From 1278cf66cf4b1c3d30e311200b50c45457c92baa Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Tue, 15 Jan 2019 15:18:56 +1100 Subject: nvram: Replace nvram_* function exports with static functions Replace nvram_* functions with static functions in nvram.h. These will become wrappers for struct nvram_ops method calls. This patch effectively disables existing NVRAM functionality so as to allow the rest of the series to be bisected without build failures. That functionality is gradually re-implemented in subsequent patches. Replace the sole validate-checksum-and-read-byte sequence with a call to nvram_read() which will gain the same semantics in subsequent patches. Remove unused exports. Acked-by: Geert Uytterhoeven Signed-off-by: Finn Thain Signed-off-by: Greg Kroah-Hartman --- include/linux/nvram.h | 32 +++++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/nvram.h b/include/linux/nvram.h index 28bfb9ab94ca..eb5b52a9a747 100644 --- a/include/linux/nvram.h +++ b/include/linux/nvram.h @@ -2,13 +2,31 @@ #ifndef _LINUX_NVRAM_H #define _LINUX_NVRAM_H +#include #include -/* __foo is foo without grabbing the rtc_lock - get it yourself */ -extern unsigned char __nvram_read_byte(int i); -extern unsigned char nvram_read_byte(int i); -extern void __nvram_write_byte(unsigned char c, int i); -extern void nvram_write_byte(unsigned char c, int i); -extern int __nvram_check_checksum(void); -extern int nvram_check_checksum(void); +static inline ssize_t nvram_get_size(void) +{ + return -ENODEV; +} + +static inline unsigned char nvram_read_byte(int addr) +{ + return 0xFF; +} + +static inline void nvram_write_byte(unsigned char val, int addr) +{ +} + +static inline ssize_t nvram_read(char *buf, size_t count, loff_t *ppos) +{ + return -ENODEV; +} + +static inline ssize_t nvram_write(char *buf, size_t count, loff_t *ppos) +{ + return -ENODEV; +} + #endif /* _LINUX_NVRAM_H */ -- cgit v1.2.3-71-gd317 From a084dbf6592c22468eb946014b2e731fb42da7a9 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Tue, 15 Jan 2019 15:18:56 +1100 Subject: m68k/atari: Implement arch_nvram_ops struct By implementing an arch_nvram_ops struct, a platform can re-use the drivers/char/nvram.c module without needing any arch-specific code in that module. Atari does so here. Acked-by: Geert Uytterhoeven Signed-off-by: Finn Thain Signed-off-by: Greg Kroah-Hartman --- arch/m68k/atari/nvram.c | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/nvram.h | 14 ++++++++++++++ 2 files changed, 63 insertions(+) (limited to 'include') diff --git a/arch/m68k/atari/nvram.c b/arch/m68k/atari/nvram.c index 1d767847ffa6..e75adebe6e7d 100644 --- a/arch/m68k/atari/nvram.c +++ b/arch/m68k/atari/nvram.c @@ -74,6 +74,55 @@ static void __nvram_set_checksum(void) __nvram_write_byte(sum, ATARI_CKS_LOC + 1); } +static ssize_t atari_nvram_read(char *buf, size_t count, loff_t *ppos) +{ + char *p = buf; + loff_t i; + + spin_lock_irq(&rtc_lock); + if (!__nvram_check_checksum()) { + spin_unlock_irq(&rtc_lock); + return -EIO; + } + for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p) + *p = __nvram_read_byte(i); + spin_unlock_irq(&rtc_lock); + + *ppos = i; + return p - buf; +} + +static ssize_t atari_nvram_write(char *buf, size_t count, loff_t *ppos) +{ + char *p = buf; + loff_t i; + + spin_lock_irq(&rtc_lock); + if (!__nvram_check_checksum()) { + spin_unlock_irq(&rtc_lock); + return -EIO; + } + for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p) + __nvram_write_byte(*p, i); + __nvram_set_checksum(); + spin_unlock_irq(&rtc_lock); + + *ppos = i; + return p - buf; +} + +static ssize_t atari_nvram_get_size(void) +{ + return NVRAM_BYTES; +} + +const struct nvram_ops arch_nvram_ops = { + .read = atari_nvram_read, + .write = atari_nvram_write, + .get_size = atari_nvram_get_size, +}; +EXPORT_SYMBOL(arch_nvram_ops); + #ifdef CONFIG_PROC_FS static struct { unsigned char val; diff --git a/include/linux/nvram.h b/include/linux/nvram.h index eb5b52a9a747..a1e01dc89759 100644 --- a/include/linux/nvram.h +++ b/include/linux/nvram.h @@ -5,8 +5,18 @@ #include #include +struct nvram_ops { + ssize_t (*get_size)(void); + ssize_t (*read)(char *, size_t, loff_t *); + ssize_t (*write)(char *, size_t, loff_t *); +}; + +extern const struct nvram_ops arch_nvram_ops; + static inline ssize_t nvram_get_size(void) { + if (arch_nvram_ops.get_size) + return arch_nvram_ops.get_size(); return -ENODEV; } @@ -21,11 +31,15 @@ static inline void nvram_write_byte(unsigned char val, int addr) static inline ssize_t nvram_read(char *buf, size_t count, loff_t *ppos) { + if (arch_nvram_ops.read) + return arch_nvram_ops.read(buf, count, ppos); return -ENODEV; } static inline ssize_t nvram_write(char *buf, size_t count, loff_t *ppos) { + if (arch_nvram_ops.write) + return arch_nvram_ops.write(buf, count, ppos); return -ENODEV; } -- cgit v1.2.3-71-gd317 From a156c7ba669c65b55c7afcc3994e1199cc0cad47 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Tue, 15 Jan 2019 15:18:56 +1100 Subject: powerpc: Replace nvram_* extern declarations with standard header Remove the nvram_read_byte() and nvram_write_byte() declarations in powerpc/include/asm/nvram.h and use the cross-platform static functions in linux/nvram.h instead. Tested-by: Stan Johnson Signed-off-by: Finn Thain Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/include/asm/nvram.h | 6 ------ arch/powerpc/kernel/setup_32.c | 25 +------------------------ drivers/char/generic_nvram.c | 1 + drivers/video/fbdev/matrox/matroxfb_base.c | 2 +- include/linux/nvram.h | 3 +++ 5 files changed, 6 insertions(+), 31 deletions(-) (limited to 'include') diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h index 09a518bb7c03..56a388da9c4f 100644 --- a/arch/powerpc/include/asm/nvram.h +++ b/arch/powerpc/include/asm/nvram.h @@ -98,10 +98,4 @@ extern int nvram_write_os_partition(struct nvram_os_partition *part, unsigned int err_type, unsigned int error_log_cnt); -/* Determine NVRAM size */ -extern ssize_t nvram_get_size(void); - -/* Normal access to NVRAM */ -extern unsigned char nvram_read_byte(int i); -extern void nvram_write_byte(unsigned char c, int i); #endif /* _ASM_POWERPC_NVRAM_H */ diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 947f904688b0..f5107796e2d7 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -149,30 +150,6 @@ __setup("l3cr=", ppc_setup_l3cr); #ifdef CONFIG_GENERIC_NVRAM -/* Generic nvram hooks used by drivers/char/gen_nvram.c */ -unsigned char nvram_read_byte(int addr) -{ - if (ppc_md.nvram_read_val) - return ppc_md.nvram_read_val(addr); - return 0xff; -} -EXPORT_SYMBOL(nvram_read_byte); - -void nvram_write_byte(unsigned char val, int addr) -{ - if (ppc_md.nvram_write_val) - ppc_md.nvram_write_val(addr, val); -} -EXPORT_SYMBOL(nvram_write_byte); - -ssize_t nvram_get_size(void) -{ - if (ppc_md.nvram_size) - return ppc_md.nvram_size(); - return -1; -} -EXPORT_SYMBOL(nvram_get_size); - void nvram_sync(void) { if (ppc_md.nvram_sync) diff --git a/drivers/char/generic_nvram.c b/drivers/char/generic_nvram.c index ff5394f47587..0c22b9503e84 100644 --- a/drivers/char/generic_nvram.c +++ b/drivers/char/generic_nvram.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c index 838869c6490c..0a4e5bad33f4 100644 --- a/drivers/video/fbdev/matrox/matroxfb_base.c +++ b/drivers/video/fbdev/matrox/matroxfb_base.c @@ -111,12 +111,12 @@ #include "matroxfb_g450.h" #include #include +#include #include #include #ifdef CONFIG_PPC_PMAC #include -unsigned char nvram_read_byte(int); static int default_vmode = VMODE_NVRAM; static int default_cmode = CMODE_NVRAM; #endif diff --git a/include/linux/nvram.h b/include/linux/nvram.h index a1e01dc89759..79431dab87a1 100644 --- a/include/linux/nvram.h +++ b/include/linux/nvram.h @@ -15,8 +15,11 @@ extern const struct nvram_ops arch_nvram_ops; static inline ssize_t nvram_get_size(void) { +#ifdef CONFIG_PPC +#else if (arch_nvram_ops.get_size) return arch_nvram_ops.get_size(); +#endif return -ENODEV; } -- cgit v1.2.3-71-gd317 From d5bbb5021ce8d9ff561c7469f5b4589ccb3bc4a6 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Tue, 15 Jan 2019 15:18:56 +1100 Subject: char/nvram: Adopt arch_nvram_ops NVRAMs on different platforms and architectures have different attributes and access methods. E.g. some platforms have byte-at-a-time accessor functions while others have byte-range accessor functions. Some have checksum functionality while others do not. By calling ops struct methods via the common wrapper functions, the nvram module and other drivers can make use of the available NVRAM functionality in a portable way. Signed-off-by: Finn Thain Signed-off-by: Greg Kroah-Hartman --- drivers/char/nvram.c | 30 ++++++++++++++++++++++++------ include/linux/nvram.h | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index c98775bfd896..2df391f78986 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c @@ -52,9 +52,11 @@ static DEFINE_MUTEX(nvram_mutex); static DEFINE_SPINLOCK(nvram_state_lock); static int nvram_open_cnt; /* #times opened */ static int nvram_open_mode; /* special open modes */ +static ssize_t nvram_size; #define NVRAM_WRITE 1 /* opened for writing (exclusive) */ #define NVRAM_EXCL 2 /* opened with O_EXCL */ +#ifdef CONFIG_X86 /* * These functions are provided to be called internally or by other parts of * the kernel. It's up to the caller to ensure correct checksum before reading @@ -145,6 +147,19 @@ void nvram_set_checksum(void) } #endif /* 0 */ +static ssize_t pc_nvram_get_size(void) +{ + return NVRAM_BYTES; +} + +const struct nvram_ops arch_nvram_ops = { + .read_byte = pc_nvram_read_byte, + .write_byte = pc_nvram_write_byte, + .get_size = pc_nvram_get_size, +}; +EXPORT_SYMBOL(arch_nvram_ops); +#endif /* CONFIG_X86 */ + /* * The are the file operation function for user access to /dev/nvram */ @@ -152,7 +167,7 @@ void nvram_set_checksum(void) static loff_t nvram_misc_llseek(struct file *file, loff_t offset, int origin) { return generic_file_llseek_size(file, offset, origin, MAX_LFS_FILESIZE, - NVRAM_BYTES); + nvram_size); } static ssize_t nvram_misc_read(struct file *file, char __user *buf, @@ -303,8 +318,7 @@ static int nvram_misc_release(struct inode *inode, struct file *file) return 0; } -#ifdef CONFIG_PROC_FS - +#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS) static const char * const floppy_types[] = { "none", "5.25'' 360k", "5.25'' 1.2M", "3.5'' 720k", "3.5'' 1.44M", "3.5'' 2.88M", "3.5'' 2.88M" @@ -394,7 +408,7 @@ static int nvram_proc_read(struct seq_file *seq, void *offset) return 0; } -#endif /* CONFIG_PROC_FS */ +#endif /* CONFIG_X86 && CONFIG_PROC_FS */ static const struct file_operations nvram_misc_fops = { .owner = THIS_MODULE, @@ -416,13 +430,17 @@ static int __init nvram_module_init(void) { int ret; + nvram_size = nvram_get_size(); + if (nvram_size < 0) + return nvram_size; + ret = misc_register(&nvram_misc); if (ret) { pr_err("nvram: can't misc_register on minor=%d\n", NVRAM_MINOR); return ret; } -#ifdef CONFIG_PROC_FS +#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS) if (!proc_create_single("driver/nvram", 0, NULL, nvram_proc_read)) { pr_err("nvram: can't create /proc/driver/nvram\n"); misc_deregister(&nvram_misc); @@ -436,7 +454,7 @@ static int __init nvram_module_init(void) static void __exit nvram_module_exit(void) { -#ifdef CONFIG_PROC_FS +#if defined(CONFIG_X86) && defined(CONFIG_PROC_FS) remove_proc_entry("driver/nvram", NULL); #endif misc_deregister(&nvram_misc); diff --git a/include/linux/nvram.h b/include/linux/nvram.h index 79431dab87a1..bb4ea8cc6ea6 100644 --- a/include/linux/nvram.h +++ b/include/linux/nvram.h @@ -5,8 +5,30 @@ #include #include +/** + * struct nvram_ops - NVRAM functionality made available to drivers + * @read: validate checksum (if any) then load a range of bytes from NVRAM + * @write: store a range of bytes to NVRAM then update checksum (if any) + * @read_byte: load a single byte from NVRAM + * @write_byte: store a single byte to NVRAM + * @get_size: return the fixed number of bytes in the NVRAM + * + * Architectures which provide an nvram ops struct need not implement all + * of these methods. If the NVRAM hardware can be accessed only one byte + * at a time then it may be sufficient to provide .read_byte and .write_byte. + * If the NVRAM has a checksum (and it is to be checked) the .read and + * .write methods can be used to implement that efficiently. + * + * Portable drivers may use the wrapper functions defined here. + * The nvram_read() and nvram_write() functions call the .read and .write + * methods when available and fall back on the .read_byte and .write_byte + * methods otherwise. + */ + struct nvram_ops { ssize_t (*get_size)(void); + unsigned char (*read_byte)(int); + void (*write_byte)(unsigned char, int); ssize_t (*read)(char *, size_t, loff_t *); ssize_t (*write)(char *, size_t, loff_t *); }; @@ -25,11 +47,21 @@ static inline ssize_t nvram_get_size(void) static inline unsigned char nvram_read_byte(int addr) { +#ifdef CONFIG_PPC +#else + if (arch_nvram_ops.read_byte) + return arch_nvram_ops.read_byte(addr); +#endif return 0xFF; } static inline void nvram_write_byte(unsigned char val, int addr) { +#ifdef CONFIG_PPC +#else + if (arch_nvram_ops.write_byte) + arch_nvram_ops.write_byte(val, addr); +#endif } static inline ssize_t nvram_read(char *buf, size_t count, loff_t *ppos) -- cgit v1.2.3-71-gd317 From 2d58636e0af724f38acad25246c1625efec36122 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Tue, 15 Jan 2019 15:18:56 +1100 Subject: char/nvram: Allow the set_checksum and initialize ioctls to be omitted The drivers/char/nvram.c module has previously supported only RTC "CMOS" NVRAM, for which it provides appropriate checksum ioctls. Make these ioctls optional so the module can be re-used with other kinds of NVRAM. The ops struct methods that implement the ioctls now return error codes so that a multi-platform kernel binary can do the right thing when running on hardware without a suitable NVRAM. Signed-off-by: Finn Thain Signed-off-by: Greg Kroah-Hartman --- drivers/char/nvram.c | 70 +++++++++++++++++++++++++++++---------------------- include/linux/nvram.h | 2 ++ 2 files changed, 42 insertions(+), 30 deletions(-) (limited to 'include') diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index 2df391f78986..f88ef41d0598 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c @@ -136,16 +136,25 @@ static void __nvram_set_checksum(void) __nvram_write_byte(sum & 0xff, PC_CKS_LOC + 1); } -#if 0 -void nvram_set_checksum(void) +static long pc_nvram_set_checksum(void) { - unsigned long flags; + spin_lock_irq(&rtc_lock); + __nvram_set_checksum(); + spin_unlock_irq(&rtc_lock); + return 0; +} - spin_lock_irqsave(&rtc_lock, flags); +static long pc_nvram_initialize(void) +{ + ssize_t i; + + spin_lock_irq(&rtc_lock); + for (i = 0; i < NVRAM_BYTES; ++i) + __nvram_write_byte(0, i); __nvram_set_checksum(); - spin_unlock_irqrestore(&rtc_lock, flags); + spin_unlock_irq(&rtc_lock); + return 0; } -#endif /* 0 */ static ssize_t pc_nvram_get_size(void) { @@ -156,6 +165,8 @@ const struct nvram_ops arch_nvram_ops = { .read_byte = pc_nvram_read_byte, .write_byte = pc_nvram_write_byte, .get_size = pc_nvram_get_size, + .set_checksum = pc_nvram_set_checksum, + .initialize = pc_nvram_initialize, }; EXPORT_SYMBOL(arch_nvram_ops); #endif /* CONFIG_X86 */ @@ -241,51 +252,50 @@ checksum_err: static long nvram_misc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { - int i; + long ret = -ENOTTY; switch (cmd) { - case NVRAM_INIT: /* initialize NVRAM contents and checksum */ if (!capable(CAP_SYS_ADMIN)) return -EACCES; - mutex_lock(&nvram_mutex); - spin_lock_irq(&rtc_lock); - - for (i = 0; i < NVRAM_BYTES; ++i) - __nvram_write_byte(0, i); - __nvram_set_checksum(); - - spin_unlock_irq(&rtc_lock); - mutex_unlock(&nvram_mutex); - return 0; - + if (arch_nvram_ops.initialize != NULL) { + mutex_lock(&nvram_mutex); + ret = arch_nvram_ops.initialize(); + mutex_unlock(&nvram_mutex); + } + break; case NVRAM_SETCKS: /* just set checksum, contents unchanged (maybe useful after * checksum garbaged somehow...) */ if (!capable(CAP_SYS_ADMIN)) return -EACCES; - mutex_lock(&nvram_mutex); - spin_lock_irq(&rtc_lock); - __nvram_set_checksum(); - spin_unlock_irq(&rtc_lock); - mutex_unlock(&nvram_mutex); - return 0; - - default: - return -ENOTTY; + if (arch_nvram_ops.set_checksum != NULL) { + mutex_lock(&nvram_mutex); + ret = arch_nvram_ops.set_checksum(); + mutex_unlock(&nvram_mutex); + } + break; } + return ret; } static int nvram_misc_open(struct inode *inode, struct file *file) { spin_lock(&nvram_state_lock); + /* Prevent multiple readers/writers if desired. */ if ((nvram_open_cnt && (file->f_flags & O_EXCL)) || - (nvram_open_mode & NVRAM_EXCL) || - ((file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE))) { + (nvram_open_mode & NVRAM_EXCL)) { + spin_unlock(&nvram_state_lock); + return -EBUSY; + } + + /* Prevent multiple writers if the set_checksum ioctl is implemented. */ + if ((arch_nvram_ops.set_checksum != NULL) && + (file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE)) { spin_unlock(&nvram_state_lock); return -EBUSY; } diff --git a/include/linux/nvram.h b/include/linux/nvram.h index bb4ea8cc6ea6..31c763087746 100644 --- a/include/linux/nvram.h +++ b/include/linux/nvram.h @@ -31,6 +31,8 @@ struct nvram_ops { void (*write_byte)(unsigned char, int); ssize_t (*read)(char *, size_t, loff_t *); ssize_t (*write)(char *, size_t, loff_t *); + long (*initialize)(void); + long (*set_checksum)(void); }; extern const struct nvram_ops arch_nvram_ops; -- cgit v1.2.3-71-gd317 From 109b3a89a7c48405d61a05d7a1720581a4f1574c Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Tue, 15 Jan 2019 15:18:56 +1100 Subject: char/nvram: Implement NVRAM read/write methods Refactor the RTC "CMOS" NVRAM functions so that they can be used as arch_nvram_ops methods. Checksumming logic is moved from the misc device operations to the nvram read/write operations. This makes the misc device implementation more generic. This preserves the locking mechanism such that "read if checksum valid" and "write and update checksum" remain atomic operations. Some platforms implement byte-range read/write methods which are similar to file_operations struct methods. Other platforms provide only byte-at-a-time methods. The former are more efficient but may be unavailable so fall back on the latter methods when necessary. Tested-by: Stan Johnson Signed-off-by: Finn Thain Signed-off-by: Greg Kroah-Hartman --- drivers/char/nvram.c | 120 +++++++++++++++++++++++++++++++------------------- include/linux/nvram.h | 32 +++++++++++++- 2 files changed, 104 insertions(+), 48 deletions(-) (limited to 'include') diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index f88ef41d0598..adcc213c331e 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c @@ -41,6 +41,7 @@ #include #include #include +#include #include #include #include @@ -161,7 +162,46 @@ static ssize_t pc_nvram_get_size(void) return NVRAM_BYTES; } +static ssize_t pc_nvram_read(char *buf, size_t count, loff_t *ppos) +{ + char *p = buf; + loff_t i; + + spin_lock_irq(&rtc_lock); + if (!__nvram_check_checksum()) { + spin_unlock_irq(&rtc_lock); + return -EIO; + } + for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p) + *p = __nvram_read_byte(i); + spin_unlock_irq(&rtc_lock); + + *ppos = i; + return p - buf; +} + +static ssize_t pc_nvram_write(char *buf, size_t count, loff_t *ppos) +{ + char *p = buf; + loff_t i; + + spin_lock_irq(&rtc_lock); + if (!__nvram_check_checksum()) { + spin_unlock_irq(&rtc_lock); + return -EIO; + } + for (i = *ppos; count > 0 && i < NVRAM_BYTES; --count, ++i, ++p) + __nvram_write_byte(*p, i); + __nvram_set_checksum(); + spin_unlock_irq(&rtc_lock); + + *ppos = i; + return p - buf; +} + const struct nvram_ops arch_nvram_ops = { + .read = pc_nvram_read, + .write = pc_nvram_write, .read_byte = pc_nvram_read_byte, .write_byte = pc_nvram_write_byte, .get_size = pc_nvram_get_size, @@ -184,69 +224,57 @@ static loff_t nvram_misc_llseek(struct file *file, loff_t offset, int origin) static ssize_t nvram_misc_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - unsigned char contents[NVRAM_BYTES]; - unsigned i = *ppos; - unsigned char *tmp; - - spin_lock_irq(&rtc_lock); + char *tmp; + ssize_t ret; - if (!__nvram_check_checksum()) - goto checksum_err; - for (tmp = contents; count-- > 0 && i < NVRAM_BYTES; ++i, ++tmp) - *tmp = __nvram_read_byte(i); + if (!access_ok(buf, count)) + return -EFAULT; + if (*ppos >= nvram_size) + return 0; - spin_unlock_irq(&rtc_lock); + count = min_t(size_t, count, nvram_size - *ppos); + count = min_t(size_t, count, PAGE_SIZE); - if (copy_to_user(buf, contents, tmp - contents)) - return -EFAULT; + tmp = kmalloc(count, GFP_KERNEL); + if (!tmp) + return -ENOMEM; - *ppos = i; + ret = nvram_read(tmp, count, ppos); + if (ret <= 0) + goto out; - return tmp - contents; + if (copy_to_user(buf, tmp, ret)) { + *ppos -= ret; + ret = -EFAULT; + } -checksum_err: - spin_unlock_irq(&rtc_lock); - return -EIO; +out: + kfree(tmp); + return ret; } static ssize_t nvram_misc_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - unsigned char contents[NVRAM_BYTES]; - unsigned i = *ppos; - unsigned char *tmp; - - if (i >= NVRAM_BYTES) - return 0; /* Past EOF */ - - if (count > NVRAM_BYTES - i) - count = NVRAM_BYTES - i; - if (count > NVRAM_BYTES) - return -EFAULT; /* Can't happen, but prove it to gcc */ + char *tmp; + ssize_t ret; - if (copy_from_user(contents, buf, count)) + if (!access_ok(buf, count)) return -EFAULT; + if (*ppos >= nvram_size) + return 0; - spin_lock_irq(&rtc_lock); - - if (!__nvram_check_checksum()) - goto checksum_err; - - for (tmp = contents; count--; ++i, ++tmp) - __nvram_write_byte(*tmp, i); + count = min_t(size_t, count, nvram_size - *ppos); + count = min_t(size_t, count, PAGE_SIZE); - __nvram_set_checksum(); - - spin_unlock_irq(&rtc_lock); + tmp = memdup_user(buf, count); + if (IS_ERR(tmp)) + return PTR_ERR(tmp); - *ppos = i; - - return tmp - contents; - -checksum_err: - spin_unlock_irq(&rtc_lock); - return -EIO; + ret = nvram_write(tmp, count, ppos); + kfree(tmp); + return ret; } static long nvram_misc_ioctl(struct file *file, unsigned int cmd, diff --git a/include/linux/nvram.h b/include/linux/nvram.h index 31c763087746..9df85703735c 100644 --- a/include/linux/nvram.h +++ b/include/linux/nvram.h @@ -66,18 +66,46 @@ static inline void nvram_write_byte(unsigned char val, int addr) #endif } +static inline ssize_t nvram_read_bytes(char *buf, size_t count, loff_t *ppos) +{ + ssize_t nvram_size = nvram_get_size(); + loff_t i; + char *p = buf; + + if (nvram_size < 0) + return nvram_size; + for (i = *ppos; count > 0 && i < nvram_size; ++i, ++p, --count) + *p = nvram_read_byte(i); + *ppos = i; + return p - buf; +} + +static inline ssize_t nvram_write_bytes(char *buf, size_t count, loff_t *ppos) +{ + ssize_t nvram_size = nvram_get_size(); + loff_t i; + char *p = buf; + + if (nvram_size < 0) + return nvram_size; + for (i = *ppos; count > 0 && i < nvram_size; ++i, ++p, --count) + nvram_write_byte(*p, i); + *ppos = i; + return p - buf; +} + static inline ssize_t nvram_read(char *buf, size_t count, loff_t *ppos) { if (arch_nvram_ops.read) return arch_nvram_ops.read(buf, count, ppos); - return -ENODEV; + return nvram_read_bytes(buf, count, ppos); } static inline ssize_t nvram_write(char *buf, size_t count, loff_t *ppos) { if (arch_nvram_ops.write) return arch_nvram_ops.write(buf, count, ppos); - return -ENODEV; + return nvram_write_bytes(buf, count, ppos); } #endif /* _LINUX_NVRAM_H */ -- cgit v1.2.3-71-gd317 From aefcb7460e0b5f35f72601b7a98eec5ca1639cf2 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Tue, 15 Jan 2019 15:18:56 +1100 Subject: m68k/mac: Fix PRAM accessors PMU-based m68k Macs pre-date PowerMac-style NVRAM. Use the appropriate PMU commands. Also implement the missing XPRAM accessors for VIA-based Macs. Acked-by: Geert Uytterhoeven Tested-by: Stan Johnson Signed-off-by: Finn Thain Signed-off-by: Greg Kroah-Hartman --- arch/m68k/mac/misc.c | 43 +++++++++++++++++++++++++++++++++---------- include/uapi/linux/pmu.h | 2 ++ 2 files changed, 35 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/arch/m68k/mac/misc.c b/arch/m68k/mac/misc.c index af000a015f68..d016ca2e0d10 100644 --- a/arch/m68k/mac/misc.c +++ b/arch/m68k/mac/misc.c @@ -66,23 +66,22 @@ static unsigned char pmu_pram_read_byte(int offset) { struct adb_request req; - if (pmu_request(&req, NULL, 3, PMU_READ_NVRAM, - (offset >> 8) & 0xFF, offset & 0xFF) < 0) + if (pmu_request(&req, NULL, 3, PMU_READ_XPRAM, + offset & 0xFF, 1) < 0) return 0; - while (!req.complete) - pmu_poll(); - return req.reply[3]; + pmu_wait_complete(&req); + + return req.reply[0]; } static void pmu_pram_write_byte(unsigned char data, int offset) { struct adb_request req; - if (pmu_request(&req, NULL, 4, PMU_WRITE_NVRAM, - (offset >> 8) & 0xFF, offset & 0xFF, data) < 0) + if (pmu_request(&req, NULL, 4, PMU_WRITE_XPRAM, + offset & 0xFF, 1, data) < 0) return; - while (!req.complete) - pmu_poll(); + pmu_wait_complete(&req); } #endif /* CONFIG_ADB_PMU */ @@ -151,6 +150,16 @@ static void via_rtc_send(__u8 data) #define RTC_REG_SECONDS_3 3 #define RTC_REG_WRITE_PROTECT 13 +/* + * Inside Mac has no information about two-byte RTC commands but + * the MAME/MESS source code has the essentials. + */ + +#define RTC_REG_XPRAM 14 +#define RTC_CMD_XPRAM_READ (RTC_CMD_READ(RTC_REG_XPRAM) << 8) +#define RTC_CMD_XPRAM_WRITE (RTC_CMD_WRITE(RTC_REG_XPRAM) << 8) +#define RTC_CMD_XPRAM_ARG(a) (((a & 0xE0) << 3) | ((a & 0x1F) << 2)) + /* * Execute a VIA PRAM/RTC command. For read commands * data should point to a one-byte buffer for the @@ -198,11 +207,25 @@ static void via_rtc_command(int command, __u8 *data) static unsigned char via_pram_read_byte(int offset) { - return 0; + unsigned char temp; + + via_rtc_command(RTC_CMD_XPRAM_READ | RTC_CMD_XPRAM_ARG(offset), &temp); + + return temp; } static void via_pram_write_byte(unsigned char data, int offset) { + unsigned char temp; + + temp = 0x55; + via_rtc_command(RTC_CMD_WRITE(RTC_REG_WRITE_PROTECT), &temp); + + temp = data; + via_rtc_command(RTC_CMD_XPRAM_WRITE | RTC_CMD_XPRAM_ARG(offset), &temp); + + temp = 0x55 | RTC_FLG_WRITE_PROTECT; + via_rtc_command(RTC_CMD_WRITE(RTC_REG_WRITE_PROTECT), &temp); } /* diff --git a/include/uapi/linux/pmu.h b/include/uapi/linux/pmu.h index 97256f90e6df..f2fc1bd80017 100644 --- a/include/uapi/linux/pmu.h +++ b/include/uapi/linux/pmu.h @@ -19,7 +19,9 @@ #define PMU_POWER_CTRL 0x11 /* control power of some devices */ #define PMU_ADB_CMD 0x20 /* send ADB packet */ #define PMU_ADB_POLL_OFF 0x21 /* disable ADB auto-poll */ +#define PMU_WRITE_XPRAM 0x32 /* write eXtended Parameter RAM */ #define PMU_WRITE_NVRAM 0x33 /* write non-volatile RAM */ +#define PMU_READ_XPRAM 0x3a /* read eXtended Parameter RAM */ #define PMU_READ_NVRAM 0x3b /* read non-volatile RAM */ #define PMU_SET_RTC 0x30 /* set real-time clock */ #define PMU_READ_RTC 0x38 /* read real-time clock */ -- cgit v1.2.3-71-gd317 From 95ac14b8a32817dcd1f13ae4787891484966d2d5 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Tue, 15 Jan 2019 15:18:56 +1100 Subject: powerpc: Implement nvram ioctls Add the powerpc-specific ioctls to the nvram module. This allows the nvram module to replace the generic_nvram module. Tested-by: Stan Johnson Signed-off-by: Finn Thain Signed-off-by: Greg Kroah-Hartman --- drivers/char/nvram.c | 38 ++++++++++++++++++++++++++++++++++++++ include/linux/nvram.h | 2 ++ 2 files changed, 40 insertions(+) (limited to 'include') diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c index c9e295d73dc5..944f05fddacd 100644 --- a/drivers/char/nvram.c +++ b/drivers/char/nvram.c @@ -48,6 +48,9 @@ #include #include +#ifdef CONFIG_PPC +#include +#endif static DEFINE_MUTEX(nvram_mutex); static DEFINE_SPINLOCK(nvram_state_lock); @@ -283,6 +286,38 @@ static long nvram_misc_ioctl(struct file *file, unsigned int cmd, long ret = -ENOTTY; switch (cmd) { +#ifdef CONFIG_PPC + case OBSOLETE_PMAC_NVRAM_GET_OFFSET: + pr_warn("nvram: Using obsolete PMAC_NVRAM_GET_OFFSET ioctl\n"); + /* fall through */ + case IOC_NVRAM_GET_OFFSET: + ret = -EINVAL; +#ifdef CONFIG_PPC_PMAC + if (machine_is(powermac)) { + int part, offset; + + if (copy_from_user(&part, (void __user *)arg, + sizeof(part)) != 0) + return -EFAULT; + if (part < pmac_nvram_OF || part > pmac_nvram_NR) + return -EINVAL; + offset = pmac_get_partition(part); + if (copy_to_user((void __user *)arg, + &offset, sizeof(offset)) != 0) + return -EFAULT; + ret = 0; + } +#endif + break; + case IOC_NVRAM_SYNC: + if (ppc_md.nvram_sync != NULL) { + mutex_lock(&nvram_mutex); + ppc_md.nvram_sync(); + mutex_unlock(&nvram_mutex); + } + ret = 0; + break; +#elif defined(CONFIG_X86) || defined(CONFIG_M68K) case NVRAM_INIT: /* initialize NVRAM contents and checksum */ if (!capable(CAP_SYS_ADMIN)) @@ -306,6 +341,7 @@ static long nvram_misc_ioctl(struct file *file, unsigned int cmd, mutex_unlock(&nvram_mutex); } break; +#endif /* CONFIG_X86 || CONFIG_M68K */ } return ret; } @@ -321,12 +357,14 @@ static int nvram_misc_open(struct inode *inode, struct file *file) return -EBUSY; } +#if defined(CONFIG_X86) || defined(CONFIG_M68K) /* Prevent multiple writers if the set_checksum ioctl is implemented. */ if ((arch_nvram_ops.set_checksum != NULL) && (file->f_mode & FMODE_WRITE) && (nvram_open_mode & NVRAM_WRITE)) { spin_unlock(&nvram_state_lock); return -EBUSY; } +#endif if (file->f_flags & O_EXCL) nvram_open_mode |= NVRAM_EXCL; diff --git a/include/linux/nvram.h b/include/linux/nvram.h index 9df85703735c..9e3a957c8f1f 100644 --- a/include/linux/nvram.h +++ b/include/linux/nvram.h @@ -31,8 +31,10 @@ struct nvram_ops { void (*write_byte)(unsigned char, int); ssize_t (*read)(char *, size_t, loff_t *); ssize_t (*write)(char *, size_t, loff_t *); +#if defined(CONFIG_X86) || defined(CONFIG_M68K) long (*initialize)(void); long (*set_checksum)(void); +#endif }; extern const struct nvram_ops arch_nvram_ops; -- cgit v1.2.3-71-gd317 From f9c3a570f5fc584f2ca2dd222d1b8c8537fc55f6 Mon Sep 17 00:00:00 2001 From: Finn Thain Date: Tue, 15 Jan 2019 15:18:56 +1100 Subject: powerpc: Enable HAVE_ARCH_NVRAM_OPS and disable GENERIC_NVRAM Switch PPC32 kernels from the generic_nvram module to the nvram module. Also fix a theoretical bug where CHRP omits the chrp_nvram_init() call when CONFIG_NVRAM_MODULE=m. Tested-by: Stan Johnson Signed-off-by: Finn Thain Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/Kconfig | 6 +----- arch/powerpc/include/asm/nvram.h | 3 --- arch/powerpc/kernel/setup_32.c | 11 ----------- arch/powerpc/platforms/chrp/Makefile | 2 +- arch/powerpc/platforms/chrp/setup.c | 2 +- arch/powerpc/platforms/powermac/setup.c | 3 +-- drivers/char/Kconfig | 19 +++++++++---------- include/linux/nvram.h | 20 ++++++++++++++++++++ 8 files changed, 33 insertions(+), 33 deletions(-) (limited to 'include') diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 2890d36eb531..f62e6a3f9c4e 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -178,6 +178,7 @@ config PPC select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT + select HAVE_ARCH_NVRAM_OPS if PPC32 select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_TRACEHOOK select HAVE_CBPF_JIT if !PPC64 @@ -274,11 +275,6 @@ config SYSVIPC_COMPAT depends on COMPAT && SYSVIPC default y -# All PPC32s use generic nvram driver through ppc_md -config GENERIC_NVRAM - bool - default y if PPC32 - config SCHED_OMIT_FRAME_POINTER bool default y diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h index 56a388da9c4f..629a5cdcc865 100644 --- a/arch/powerpc/include/asm/nvram.h +++ b/arch/powerpc/include/asm/nvram.h @@ -78,9 +78,6 @@ extern int pmac_get_partition(int partition); extern u8 pmac_xpram_read(int xpaddr); extern void pmac_xpram_write(int xpaddr, u8 data); -/* Synchronize NVRAM */ -extern void nvram_sync(void); - /* Initialize NVRAM OS partition */ extern int __init nvram_init_os_partition(struct nvram_os_partition *part); diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index f5107796e2d7..c31082233a25 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -148,17 +148,6 @@ static int __init ppc_setup_l3cr(char *str) } __setup("l3cr=", ppc_setup_l3cr); -#ifdef CONFIG_GENERIC_NVRAM - -void nvram_sync(void) -{ - if (ppc_md.nvram_sync) - ppc_md.nvram_sync(); -} -EXPORT_SYMBOL(nvram_sync); - -#endif /* CONFIG_NVRAM */ - static int __init ppc_init(void) { /* clear the progress line */ diff --git a/arch/powerpc/platforms/chrp/Makefile b/arch/powerpc/platforms/chrp/Makefile index 4b3bfadc70fa..dc3465cc8bc6 100644 --- a/arch/powerpc/platforms/chrp/Makefile +++ b/arch/powerpc/platforms/chrp/Makefile @@ -1,3 +1,3 @@ obj-y += setup.o time.o pegasos_eth.o pci.o obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_NVRAM) += nvram.o +obj-$(CONFIG_NVRAM:m=y) += nvram.o diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index e66644e0fb40..e8e804289c8e 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c @@ -550,7 +550,7 @@ static void __init chrp_init_IRQ(void) static void __init chrp_init2(void) { -#ifdef CONFIG_NVRAM +#if IS_ENABLED(CONFIG_NVRAM) chrp_nvram_init(); #endif diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 2e8221e20ee8..b47f49cf9c4d 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -316,8 +316,7 @@ static void __init pmac_setup_arch(void) find_via_pmu(); smu_init(); -#if defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE) || \ - defined(CONFIG_PPC64) +#if IS_ENABLED(CONFIG_NVRAM) || defined(CONFIG_PPC64) pmac_nvram_init(); #endif #ifdef CONFIG_PPC32 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index ce9979529cf3..72866a004f07 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -244,25 +244,24 @@ source "drivers/char/hw_random/Kconfig" config NVRAM tristate "/dev/nvram support" - depends on X86 || GENERIC_NVRAM || HAVE_ARCH_NVRAM_OPS - default M68K + depends on X86 || HAVE_ARCH_NVRAM_OPS + default M68K || PPC ---help--- If you say Y here and create a character special file /dev/nvram with major number 10 and minor number 144 using mknod ("man mknod"), - you get read and write access to the extra bytes of non-volatile - memory in the real time clock (RTC), which is contained in every PC - and most Ataris. The actual number of bytes varies, depending on the - nvram in the system, but is usually 114 (128-14 for the RTC). - - This memory is conventionally called "CMOS RAM" on PCs and "NVRAM" - on Ataris. /dev/nvram may be used to view settings there, or to - change them (with some utility). It could also be used to frequently + you get read and write access to the non-volatile memory. + + /dev/nvram may be used to view settings in NVRAM or to change them + (with some utility). It could also be used to frequently save a few bits of very important data that may not be lost over power-off and for which writing to disk is too insecure. Note however that most NVRAM space in a PC belongs to the BIOS and you should NEVER idly tamper with it. See Ralf Brown's interrupt list for a guide to the use of CMOS bytes by your BIOS. + This memory is conventionally called "NVRAM" on PowerPC machines, + "CMOS RAM" on PCs, "NVRAM" on Ataris and "PRAM" on Macintoshes. + To compile this driver as a module, choose M here: the module will be called nvram. diff --git a/include/linux/nvram.h b/include/linux/nvram.h index 9e3a957c8f1f..d29d9c93a927 100644 --- a/include/linux/nvram.h +++ b/include/linux/nvram.h @@ -5,6 +5,10 @@ #include #include +#ifdef CONFIG_PPC +#include +#endif + /** * struct nvram_ops - NVRAM functionality made available to drivers * @read: validate checksum (if any) then load a range of bytes from NVRAM @@ -42,6 +46,8 @@ extern const struct nvram_ops arch_nvram_ops; static inline ssize_t nvram_get_size(void) { #ifdef CONFIG_PPC + if (ppc_md.nvram_size) + return ppc_md.nvram_size(); #else if (arch_nvram_ops.get_size) return arch_nvram_ops.get_size(); @@ -52,6 +58,8 @@ static inline ssize_t nvram_get_size(void) static inline unsigned char nvram_read_byte(int addr) { #ifdef CONFIG_PPC + if (ppc_md.nvram_read_val) + return ppc_md.nvram_read_val(addr); #else if (arch_nvram_ops.read_byte) return arch_nvram_ops.read_byte(addr); @@ -62,6 +70,8 @@ static inline unsigned char nvram_read_byte(int addr) static inline void nvram_write_byte(unsigned char val, int addr) { #ifdef CONFIG_PPC + if (ppc_md.nvram_write_val) + ppc_md.nvram_write_val(addr, val); #else if (arch_nvram_ops.write_byte) arch_nvram_ops.write_byte(val, addr); @@ -98,15 +108,25 @@ static inline ssize_t nvram_write_bytes(char *buf, size_t count, loff_t *ppos) static inline ssize_t nvram_read(char *buf, size_t count, loff_t *ppos) { +#ifdef CONFIG_PPC + if (ppc_md.nvram_read) + return ppc_md.nvram_read(buf, count, ppos); +#else if (arch_nvram_ops.read) return arch_nvram_ops.read(buf, count, ppos); +#endif return nvram_read_bytes(buf, count, ppos); } static inline ssize_t nvram_write(char *buf, size_t count, loff_t *ppos) { +#ifdef CONFIG_PPC + if (ppc_md.nvram_write) + return ppc_md.nvram_write(buf, count, ppos); +#else if (arch_nvram_ops.write) return arch_nvram_ops.write(buf, count, ppos); +#endif return nvram_write_bytes(buf, count, ppos); } -- cgit v1.2.3-71-gd317 From 11f1ceca7031deefc1a34236ab7b94360016b71d Mon Sep 17 00:00:00 2001 From: Georgi Djakov Date: Wed, 16 Jan 2019 18:10:56 +0200 Subject: interconnect: Add generic on-chip interconnect API This patch introduces a new API to get requirements and configure the interconnect buses across the entire chipset to fit with the current demand. The API is using a consumer/provider-based model, where the providers are the interconnect buses and the consumers could be various drivers. The consumers request interconnect resources (path) between endpoints and set the desired constraints on this data flow path. The providers receive requests from consumers and aggregate these requests for all master-slave pairs on that path. Then the providers configure each node along the path to support a bandwidth that satisfies all bandwidth requests that cross through that node. The topology could be complicated and multi-tiered and is SoC specific. Reviewed-by: Evan Green Signed-off-by: Georgi Djakov Signed-off-by: Greg Kroah-Hartman --- Documentation/interconnect/interconnect.rst | 94 +++++ drivers/Kconfig | 2 + drivers/Makefile | 1 + drivers/interconnect/Kconfig | 10 + drivers/interconnect/Makefile | 5 + drivers/interconnect/core.c | 567 ++++++++++++++++++++++++++++ include/linux/interconnect-provider.h | 125 ++++++ include/linux/interconnect.h | 52 +++ 8 files changed, 856 insertions(+) create mode 100644 Documentation/interconnect/interconnect.rst create mode 100644 drivers/interconnect/Kconfig create mode 100644 drivers/interconnect/Makefile create mode 100644 drivers/interconnect/core.c create mode 100644 include/linux/interconnect-provider.h create mode 100644 include/linux/interconnect.h (limited to 'include') diff --git a/Documentation/interconnect/interconnect.rst b/Documentation/interconnect/interconnect.rst new file mode 100644 index 000000000000..b8107dcc4cd3 --- /dev/null +++ b/Documentation/interconnect/interconnect.rst @@ -0,0 +1,94 @@ +.. SPDX-License-Identifier: GPL-2.0 + +===================================== +GENERIC SYSTEM INTERCONNECT SUBSYSTEM +===================================== + +Introduction +------------ + +This framework is designed to provide a standard kernel interface to control +the settings of the interconnects on an SoC. These settings can be throughput, +latency and priority between multiple interconnected devices or functional +blocks. This can be controlled dynamically in order to save power or provide +maximum performance. + +The interconnect bus is hardware with configurable parameters, which can be +set on a data path according to the requests received from various drivers. +An example of interconnect buses are the interconnects between various +components or functional blocks in chipsets. There can be multiple interconnects +on an SoC that can be multi-tiered. + +Below is a simplified diagram of a real-world SoC interconnect bus topology. + +:: + + +----------------+ +----------------+ + | HW Accelerator |--->| M NoC |<---------------+ + +----------------+ +----------------+ | + | | +------------+ + +-----+ +-------------+ V +------+ | | + | DDR | | +--------+ | PCIe | | | + +-----+ | | Slaves | +------+ | | + ^ ^ | +--------+ | | C NoC | + | | V V | | + +------------------+ +------------------------+ | | +-----+ + | |-->| |-->| |-->| CPU | + | |-->| |<--| | +-----+ + | Mem NoC | | S NoC | +------------+ + | |<--| |---------+ | + | |<--| |<------+ | | +--------+ + +------------------+ +------------------------+ | | +-->| Slaves | + ^ ^ ^ ^ ^ | | +--------+ + | | | | | | V + +------+ | +-----+ +-----+ +---------+ +----------------+ +--------+ + | CPUs | | | GPU | | DSP | | Masters |-->| P NoC |-->| Slaves | + +------+ | +-----+ +-----+ +---------+ +----------------+ +--------+ + | + +-------+ + | Modem | + +-------+ + +Terminology +----------- + +Interconnect provider is the software definition of the interconnect hardware. +The interconnect providers on the above diagram are M NoC, S NoC, C NoC, P NoC +and Mem NoC. + +Interconnect node is the software definition of the interconnect hardware +port. Each interconnect provider consists of multiple interconnect nodes, +which are connected to other SoC components including other interconnect +providers. The point on the diagram where the CPUs connect to the memory is +called an interconnect node, which belongs to the Mem NoC interconnect provider. + +Interconnect endpoints are the first or the last element of the path. Every +endpoint is a node, but not every node is an endpoint. + +Interconnect path is everything between two endpoints including all the nodes +that have to be traversed to reach from a source to destination node. It may +include multiple master-slave pairs across several interconnect providers. + +Interconnect consumers are the entities which make use of the data paths exposed +by the providers. The consumers send requests to providers requesting various +throughput, latency and priority. Usually the consumers are device drivers, that +send request based on their needs. An example for a consumer is a video decoder +that supports various formats and image sizes. + +Interconnect providers +---------------------- + +Interconnect provider is an entity that implements methods to initialize and +configure interconnect bus hardware. The interconnect provider drivers should +be registered with the interconnect provider core. + +.. kernel-doc:: include/linux/interconnect-provider.h + +Interconnect consumers +---------------------- + +Interconnect consumers are the clients which use the interconnect APIs to +get paths between endpoints and set their bandwidth/latency/QoS requirements +for these interconnect paths. + +.. kernel-doc:: include/linux/interconnect.h diff --git a/drivers/Kconfig b/drivers/Kconfig index 4f9f99057ff8..45f9decb9848 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig @@ -228,4 +228,6 @@ source "drivers/siox/Kconfig" source "drivers/slimbus/Kconfig" +source "drivers/interconnect/Kconfig" + endmenu diff --git a/drivers/Makefile b/drivers/Makefile index e1ce029d28fd..bb15b9d0e793 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -186,3 +186,4 @@ obj-$(CONFIG_MULTIPLEXER) += mux/ obj-$(CONFIG_UNISYS_VISORBUS) += visorbus/ obj-$(CONFIG_SIOX) += siox/ obj-$(CONFIG_GNSS) += gnss/ +obj-$(CONFIG_INTERCONNECT) += interconnect/ diff --git a/drivers/interconnect/Kconfig b/drivers/interconnect/Kconfig new file mode 100644 index 000000000000..a261c7d41deb --- /dev/null +++ b/drivers/interconnect/Kconfig @@ -0,0 +1,10 @@ +menuconfig INTERCONNECT + tristate "On-Chip Interconnect management support" + help + Support for management of the on-chip interconnects. + + This framework is designed to provide a generic interface for + managing the interconnects in a SoC. + + If unsure, say no. + diff --git a/drivers/interconnect/Makefile b/drivers/interconnect/Makefile new file mode 100644 index 000000000000..7a01f33b5593 --- /dev/null +++ b/drivers/interconnect/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 + +icc-core-objs := core.o + +obj-$(CONFIG_INTERCONNECT) += icc-core.o diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c new file mode 100644 index 000000000000..2b937b4f43c4 --- /dev/null +++ b/drivers/interconnect/core.c @@ -0,0 +1,567 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Interconnect framework core driver + * + * Copyright (c) 2017-2019, Linaro Ltd. + * Author: Georgi Djakov + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static DEFINE_IDR(icc_idr); +static LIST_HEAD(icc_providers); +static DEFINE_MUTEX(icc_lock); + +/** + * struct icc_req - constraints that are attached to each node + * @req_node: entry in list of requests for the particular @node + * @node: the interconnect node to which this constraint applies + * @dev: reference to the device that sets the constraints + * @avg_bw: an integer describing the average bandwidth in kBps + * @peak_bw: an integer describing the peak bandwidth in kBps + */ +struct icc_req { + struct hlist_node req_node; + struct icc_node *node; + struct device *dev; + u32 avg_bw; + u32 peak_bw; +}; + +/** + * struct icc_path - interconnect path structure + * @num_nodes: number of hops (nodes) + * @reqs: array of the requests applicable to this path of nodes + */ +struct icc_path { + size_t num_nodes; + struct icc_req reqs[]; +}; + +static struct icc_node *node_find(const int id) +{ + return idr_find(&icc_idr, id); +} + +static struct icc_path *path_init(struct device *dev, struct icc_node *dst, + ssize_t num_nodes) +{ + struct icc_node *node = dst; + struct icc_path *path; + int i; + + path = kzalloc(struct_size(path, reqs, num_nodes), GFP_KERNEL); + if (!path) + return ERR_PTR(-ENOMEM); + + path->num_nodes = num_nodes; + + for (i = num_nodes - 1; i >= 0; i--) { + node->provider->users++; + hlist_add_head(&path->reqs[i].req_node, &node->req_list); + path->reqs[i].node = node; + path->reqs[i].dev = dev; + /* reference to previous node was saved during path traversal */ + node = node->reverse; + } + + return path; +} + +static struct icc_path *path_find(struct device *dev, struct icc_node *src, + struct icc_node *dst) +{ + struct icc_path *path = ERR_PTR(-EPROBE_DEFER); + struct icc_node *n, *node = NULL; + struct list_head traverse_list; + struct list_head edge_list; + struct list_head visited_list; + size_t i, depth = 1; + bool found = false; + + INIT_LIST_HEAD(&traverse_list); + INIT_LIST_HEAD(&edge_list); + INIT_LIST_HEAD(&visited_list); + + list_add(&src->search_list, &traverse_list); + src->reverse = NULL; + + do { + list_for_each_entry_safe(node, n, &traverse_list, search_list) { + if (node == dst) { + found = true; + list_splice_init(&edge_list, &visited_list); + list_splice_init(&traverse_list, &visited_list); + break; + } + for (i = 0; i < node->num_links; i++) { + struct icc_node *tmp = node->links[i]; + + if (!tmp) { + path = ERR_PTR(-ENOENT); + goto out; + } + + if (tmp->is_traversed) + continue; + + tmp->is_traversed = true; + tmp->reverse = node; + list_add_tail(&tmp->search_list, &edge_list); + } + } + + if (found) + break; + + list_splice_init(&traverse_list, &visited_list); + list_splice_init(&edge_list, &traverse_list); + + /* count the hops including the source */ + depth++; + + } while (!list_empty(&traverse_list)); + +out: + + /* reset the traversed state */ + list_for_each_entry_reverse(n, &visited_list, search_list) + n->is_traversed = false; + + if (found) + path = path_init(dev, dst, depth); + + return path; +} + +/* + * We want the path to honor all bandwidth requests, so the average and peak + * bandwidth requirements from each consumer are aggregated at each node. + * The aggregation is platform specific, so each platform can customize it by + * implementing its own aggregate() function. + */ + +static int aggregate_requests(struct icc_node *node) +{ + struct icc_provider *p = node->provider; + struct icc_req *r; + + node->avg_bw = 0; + node->peak_bw = 0; + + hlist_for_each_entry(r, &node->req_list, req_node) + p->aggregate(node, r->avg_bw, r->peak_bw, + &node->avg_bw, &node->peak_bw); + + return 0; +} + +static int apply_constraints(struct icc_path *path) +{ + struct icc_node *next, *prev = NULL; + int ret = -EINVAL; + int i; + + for (i = 0; i < path->num_nodes; i++) { + next = path->reqs[i].node; + + /* + * Both endpoints should be valid master-slave pairs of the + * same interconnect provider that will be configured. + */ + if (!prev || next->provider != prev->provider) { + prev = next; + continue; + } + + /* set the constraints */ + ret = next->provider->set(prev, next); + if (ret) + goto out; + + prev = next; + } +out: + return ret; +} + +/** + * icc_set_bw() - set bandwidth constraints on an interconnect path + * @path: reference to the path returned by icc_get() + * @avg_bw: average bandwidth in kilobytes per second + * @peak_bw: peak bandwidth in kilobytes per second + * + * This function is used by an interconnect consumer to express its own needs + * in terms of bandwidth for a previously requested path between two endpoints. + * The requests are aggregated and each node is updated accordingly. The entire + * path is locked by a mutex to ensure that the set() is completed. + * The @path can be NULL when the "interconnects" DT properties is missing, + * which will mean that no constraints will be set. + * + * Returns 0 on success, or an appropriate error code otherwise. + */ +int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw) +{ + struct icc_node *node; + size_t i; + int ret; + + if (!path) + return 0; + + mutex_lock(&icc_lock); + + for (i = 0; i < path->num_nodes; i++) { + node = path->reqs[i].node; + + /* update the consumer request for this path */ + path->reqs[i].avg_bw = avg_bw; + path->reqs[i].peak_bw = peak_bw; + + /* aggregate requests for this node */ + aggregate_requests(node); + } + + ret = apply_constraints(path); + if (ret) + pr_debug("interconnect: error applying constraints (%d)\n", + ret); + + mutex_unlock(&icc_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(icc_set_bw); + +/** + * icc_get() - return a handle for path between two endpoints + * @dev: the device requesting the path + * @src_id: source device port id + * @dst_id: destination device port id + * + * This function will search for a path between two endpoints and return an + * icc_path handle on success. Use icc_put() to release + * constraints when they are not needed anymore. + * If the interconnect API is disabled, NULL is returned and the consumer + * drivers will still build. Drivers are free to handle this specifically, + * but they don't have to. + * + * Return: icc_path pointer on success, ERR_PTR() on error or NULL if the + * interconnect API is disabled. + */ +struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id) +{ + struct icc_node *src, *dst; + struct icc_path *path = ERR_PTR(-EPROBE_DEFER); + + mutex_lock(&icc_lock); + + src = node_find(src_id); + if (!src) + goto out; + + dst = node_find(dst_id); + if (!dst) + goto out; + + path = path_find(dev, src, dst); + if (IS_ERR(path)) + dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path)); + +out: + mutex_unlock(&icc_lock); + return path; +} +EXPORT_SYMBOL_GPL(icc_get); + +/** + * icc_put() - release the reference to the icc_path + * @path: interconnect path + * + * Use this function to release the constraints on a path when the path is + * no longer needed. The constraints will be re-aggregated. + */ +void icc_put(struct icc_path *path) +{ + struct icc_node *node; + size_t i; + int ret; + + if (!path || WARN_ON(IS_ERR(path))) + return; + + ret = icc_set_bw(path, 0, 0); + if (ret) + pr_err("%s: error (%d)\n", __func__, ret); + + mutex_lock(&icc_lock); + for (i = 0; i < path->num_nodes; i++) { + node = path->reqs[i].node; + hlist_del(&path->reqs[i].req_node); + if (!WARN_ON(!node->provider->users)) + node->provider->users--; + } + mutex_unlock(&icc_lock); + + kfree(path); +} +EXPORT_SYMBOL_GPL(icc_put); + +static struct icc_node *icc_node_create_nolock(int id) +{ + struct icc_node *node; + + /* check if node already exists */ + node = node_find(id); + if (node) + return node; + + node = kzalloc(sizeof(*node), GFP_KERNEL); + if (!node) + return ERR_PTR(-ENOMEM); + + id = idr_alloc(&icc_idr, node, id, id + 1, GFP_KERNEL); + if (id < 0) { + WARN(1, "%s: couldn't get idr\n", __func__); + kfree(node); + return ERR_PTR(id); + } + + node->id = id; + + return node; +} + +/** + * icc_node_create() - create a node + * @id: node id + * + * Return: icc_node pointer on success, or ERR_PTR() on error + */ +struct icc_node *icc_node_create(int id) +{ + struct icc_node *node; + + mutex_lock(&icc_lock); + + node = icc_node_create_nolock(id); + + mutex_unlock(&icc_lock); + + return node; +} +EXPORT_SYMBOL_GPL(icc_node_create); + +/** + * icc_node_destroy() - destroy a node + * @id: node id + */ +void icc_node_destroy(int id) +{ + struct icc_node *node; + + mutex_lock(&icc_lock); + + node = node_find(id); + if (node) { + idr_remove(&icc_idr, node->id); + WARN_ON(!hlist_empty(&node->req_list)); + } + + mutex_unlock(&icc_lock); + + kfree(node); +} +EXPORT_SYMBOL_GPL(icc_node_destroy); + +/** + * icc_link_create() - create a link between two nodes + * @node: source node id + * @dst_id: destination node id + * + * Create a link between two nodes. The nodes might belong to different + * interconnect providers and the @dst_id node might not exist (if the + * provider driver has not probed yet). So just create the @dst_id node + * and when the actual provider driver is probed, the rest of the node + * data is filled. + * + * Return: 0 on success, or an error code otherwise + */ +int icc_link_create(struct icc_node *node, const int dst_id) +{ + struct icc_node *dst; + struct icc_node **new; + int ret = 0; + + if (!node->provider) + return -EINVAL; + + mutex_lock(&icc_lock); + + dst = node_find(dst_id); + if (!dst) { + dst = icc_node_create_nolock(dst_id); + + if (IS_ERR(dst)) { + ret = PTR_ERR(dst); + goto out; + } + } + + new = krealloc(node->links, + (node->num_links + 1) * sizeof(*node->links), + GFP_KERNEL); + if (!new) { + ret = -ENOMEM; + goto out; + } + + node->links = new; + node->links[node->num_links++] = dst; + +out: + mutex_unlock(&icc_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(icc_link_create); + +/** + * icc_link_destroy() - destroy a link between two nodes + * @src: pointer to source node + * @dst: pointer to destination node + * + * Return: 0 on success, or an error code otherwise + */ +int icc_link_destroy(struct icc_node *src, struct icc_node *dst) +{ + struct icc_node **new; + size_t slot; + int ret = 0; + + if (IS_ERR_OR_NULL(src)) + return -EINVAL; + + if (IS_ERR_OR_NULL(dst)) + return -EINVAL; + + mutex_lock(&icc_lock); + + for (slot = 0; slot < src->num_links; slot++) + if (src->links[slot] == dst) + break; + + if (WARN_ON(slot == src->num_links)) { + ret = -ENXIO; + goto out; + } + + src->links[slot] = src->links[--src->num_links]; + + new = krealloc(src->links, src->num_links * sizeof(*src->links), + GFP_KERNEL); + if (new) + src->links = new; + +out: + mutex_unlock(&icc_lock); + + return ret; +} +EXPORT_SYMBOL_GPL(icc_link_destroy); + +/** + * icc_node_add() - add interconnect node to interconnect provider + * @node: pointer to the interconnect node + * @provider: pointer to the interconnect provider + */ +void icc_node_add(struct icc_node *node, struct icc_provider *provider) +{ + mutex_lock(&icc_lock); + + node->provider = provider; + list_add_tail(&node->node_list, &provider->nodes); + + mutex_unlock(&icc_lock); +} +EXPORT_SYMBOL_GPL(icc_node_add); + +/** + * icc_node_del() - delete interconnect node from interconnect provider + * @node: pointer to the interconnect node + */ +void icc_node_del(struct icc_node *node) +{ + mutex_lock(&icc_lock); + + list_del(&node->node_list); + + mutex_unlock(&icc_lock); +} +EXPORT_SYMBOL_GPL(icc_node_del); + +/** + * icc_provider_add() - add a new interconnect provider + * @provider: the interconnect provider that will be added into topology + * + * Return: 0 on success, or an error code otherwise + */ +int icc_provider_add(struct icc_provider *provider) +{ + if (WARN_ON(!provider->set)) + return -EINVAL; + + mutex_lock(&icc_lock); + + INIT_LIST_HEAD(&provider->nodes); + list_add_tail(&provider->provider_list, &icc_providers); + + mutex_unlock(&icc_lock); + + dev_dbg(provider->dev, "interconnect provider added to topology\n"); + + return 0; +} +EXPORT_SYMBOL_GPL(icc_provider_add); + +/** + * icc_provider_del() - delete previously added interconnect provider + * @provider: the interconnect provider that will be removed from topology + * + * Return: 0 on success, or an error code otherwise + */ +int icc_provider_del(struct icc_provider *provider) +{ + mutex_lock(&icc_lock); + if (provider->users) { + pr_warn("interconnect provider still has %d users\n", + provider->users); + mutex_unlock(&icc_lock); + return -EBUSY; + } + + if (!list_empty(&provider->nodes)) { + pr_warn("interconnect provider still has nodes\n"); + mutex_unlock(&icc_lock); + return -EBUSY; + } + + list_del(&provider->provider_list); + mutex_unlock(&icc_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(icc_provider_del); + +MODULE_AUTHOR("Georgi Djakov "); +MODULE_DESCRIPTION("Interconnect Driver Core"); +MODULE_LICENSE("GPL v2"); diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h new file mode 100644 index 000000000000..78208a754181 --- /dev/null +++ b/include/linux/interconnect-provider.h @@ -0,0 +1,125 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018, Linaro Ltd. + * Author: Georgi Djakov + */ + +#ifndef __LINUX_INTERCONNECT_PROVIDER_H +#define __LINUX_INTERCONNECT_PROVIDER_H + +#include + +#define icc_units_to_bps(bw) ((bw) * 1000ULL) + +struct icc_node; + +/** + * struct icc_provider - interconnect provider (controller) entity that might + * provide multiple interconnect controls + * + * @provider_list: list of the registered interconnect providers + * @nodes: internal list of the interconnect provider nodes + * @set: pointer to device specific set operation function + * @aggregate: pointer to device specific aggregate operation function + * @dev: the device this interconnect provider belongs to + * @users: count of active users + * @data: pointer to private data + */ +struct icc_provider { + struct list_head provider_list; + struct list_head nodes; + int (*set)(struct icc_node *src, struct icc_node *dst); + int (*aggregate)(struct icc_node *node, u32 avg_bw, u32 peak_bw, + u32 *agg_avg, u32 *agg_peak); + struct device *dev; + int users; + void *data; +}; + +/** + * struct icc_node - entity that is part of the interconnect topology + * + * @id: platform specific node id + * @name: node name used in debugfs + * @links: a list of targets pointing to where we can go next when traversing + * @num_links: number of links to other interconnect nodes + * @provider: points to the interconnect provider of this node + * @node_list: the list entry in the parent provider's "nodes" list + * @search_list: list used when walking the nodes graph + * @reverse: pointer to previous node when walking the nodes graph + * @is_traversed: flag that is used when walking the nodes graph + * @req_list: a list of QoS constraint requests associated with this node + * @avg_bw: aggregated value of average bandwidth requests from all consumers + * @peak_bw: aggregated value of peak bandwidth requests from all consumers + * @data: pointer to private data + */ +struct icc_node { + int id; + const char *name; + struct icc_node **links; + size_t num_links; + + struct icc_provider *provider; + struct list_head node_list; + struct list_head search_list; + struct icc_node *reverse; + u8 is_traversed:1; + struct hlist_head req_list; + u32 avg_bw; + u32 peak_bw; + void *data; +}; + +#if IS_ENABLED(CONFIG_INTERCONNECT) + +struct icc_node *icc_node_create(int id); +void icc_node_destroy(int id); +int icc_link_create(struct icc_node *node, const int dst_id); +int icc_link_destroy(struct icc_node *src, struct icc_node *dst); +void icc_node_add(struct icc_node *node, struct icc_provider *provider); +void icc_node_del(struct icc_node *node); +int icc_provider_add(struct icc_provider *provider); +int icc_provider_del(struct icc_provider *provider); + +#else + +static inline struct icc_node *icc_node_create(int id) +{ + return ERR_PTR(-ENOTSUPP); +} + +void icc_node_destroy(int id) +{ +} + +static inline int icc_link_create(struct icc_node *node, const int dst_id) +{ + return -ENOTSUPP; +} + +int icc_link_destroy(struct icc_node *src, struct icc_node *dst) +{ + return -ENOTSUPP; +} + +void icc_node_add(struct icc_node *node, struct icc_provider *provider) +{ +} + +void icc_node_del(struct icc_node *node) +{ +} + +static inline int icc_provider_add(struct icc_provider *provider) +{ + return -ENOTSUPP; +} + +static inline int icc_provider_del(struct icc_provider *provider) +{ + return -ENOTSUPP; +} + +#endif /* CONFIG_INTERCONNECT */ + +#endif /* __LINUX_INTERCONNECT_PROVIDER_H */ diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h new file mode 100644 index 000000000000..c331afb3a2c8 --- /dev/null +++ b/include/linux/interconnect.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2018-2019, Linaro Ltd. + * Author: Georgi Djakov + */ + +#ifndef __LINUX_INTERCONNECT_H +#define __LINUX_INTERCONNECT_H + +#include +#include + +/* macros for converting to icc units */ +#define Bps_to_icc(x) ((x) / 1000) +#define kBps_to_icc(x) (x) +#define MBps_to_icc(x) ((x) * 1000) +#define GBps_to_icc(x) ((x) * 1000 * 1000) +#define bps_to_icc(x) (1) +#define kbps_to_icc(x) ((x) / 8 + ((x) % 8 ? 1 : 0)) +#define Mbps_to_icc(x) ((x) * 1000 / 8) +#define Gbps_to_icc(x) ((x) * 1000 * 1000 / 8) + +struct icc_path; +struct device; + +#if IS_ENABLED(CONFIG_INTERCONNECT) + +struct icc_path *icc_get(struct device *dev, const int src_id, + const int dst_id); +void icc_put(struct icc_path *path); +int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw); + +#else + +static inline struct icc_path *icc_get(struct device *dev, const int src_id, + const int dst_id) +{ + return NULL; +} + +static inline void icc_put(struct icc_path *path) +{ +} + +static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw) +{ + return 0; +} + +#endif /* CONFIG_INTERCONNECT */ + +#endif /* __LINUX_INTERCONNECT_H */ -- cgit v1.2.3-71-gd317 From 87e3031b6fbd83ea83adf1bf9602bcce313ee787 Mon Sep 17 00:00:00 2001 From: Georgi Djakov Date: Wed, 16 Jan 2019 18:10:58 +0200 Subject: interconnect: Allow endpoints translation via DT Currently we support only platform data for specifying the interconnect endpoints. As now the endpoints are hard-coded into the consumer driver this may lead to complications when a single driver is used by multiple SoCs, which may have different interconnect topology. To avoid cluttering the consumer drivers, introduce a translation function to help us get the board specific interconnect data from device-tree. Reviewed-by: Evan Green Signed-off-by: Georgi Djakov Signed-off-by: Greg Kroah-Hartman --- drivers/interconnect/core.c | 149 ++++++++++++++++++++++++++++++++++ include/linux/interconnect-provider.h | 17 ++++ include/linux/interconnect.h | 7 ++ 3 files changed, 173 insertions(+) (limited to 'include') diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c index 2b937b4f43c4..a8c2bd35197f 100644 --- a/drivers/interconnect/core.c +++ b/drivers/interconnect/core.c @@ -15,6 +15,7 @@ #include #include #include +#include #include static DEFINE_IDR(icc_idr); @@ -194,6 +195,152 @@ out: return ret; } +/* of_icc_xlate_onecell() - Translate function using a single index. + * @spec: OF phandle args to map into an interconnect node. + * @data: private data (pointer to struct icc_onecell_data) + * + * This is a generic translate function that can be used to model simple + * interconnect providers that have one device tree node and provide + * multiple interconnect nodes. A single cell is used as an index into + * an array of icc nodes specified in the icc_onecell_data struct when + * registering the provider. + */ +struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec, + void *data) +{ + struct icc_onecell_data *icc_data = data; + unsigned int idx = spec->args[0]; + + if (idx >= icc_data->num_nodes) { + pr_err("%s: invalid index %u\n", __func__, idx); + return ERR_PTR(-EINVAL); + } + + return icc_data->nodes[idx]; +} +EXPORT_SYMBOL_GPL(of_icc_xlate_onecell); + +/** + * of_icc_get_from_provider() - Look-up interconnect node + * @spec: OF phandle args to use for look-up + * + * Looks for interconnect provider under the node specified by @spec and if + * found, uses xlate function of the provider to map phandle args to node. + * + * Returns a valid pointer to struct icc_node on success or ERR_PTR() + * on failure. + */ +static struct icc_node *of_icc_get_from_provider(struct of_phandle_args *spec) +{ + struct icc_node *node = ERR_PTR(-EPROBE_DEFER); + struct icc_provider *provider; + + if (!spec || spec->args_count != 1) + return ERR_PTR(-EINVAL); + + mutex_lock(&icc_lock); + list_for_each_entry(provider, &icc_providers, provider_list) { + if (provider->dev->of_node == spec->np) + node = provider->xlate(spec, provider->data); + if (!IS_ERR(node)) + break; + } + mutex_unlock(&icc_lock); + + return node; +} + +/** + * of_icc_get() - get a path handle from a DT node based on name + * @dev: device pointer for the consumer device + * @name: interconnect path name + * + * This function will search for a path between two endpoints and return an + * icc_path handle on success. Use icc_put() to release constraints when they + * are not needed anymore. + * If the interconnect API is disabled, NULL is returned and the consumer + * drivers will still build. Drivers are free to handle this specifically, + * but they don't have to. + * + * Return: icc_path pointer on success or ERR_PTR() on error. NULL is returned + * when the API is disabled or the "interconnects" DT property is missing. + */ +struct icc_path *of_icc_get(struct device *dev, const char *name) +{ + struct icc_path *path = ERR_PTR(-EPROBE_DEFER); + struct icc_node *src_node, *dst_node; + struct device_node *np = NULL; + struct of_phandle_args src_args, dst_args; + int idx = 0; + int ret; + + if (!dev || !dev->of_node) + return ERR_PTR(-ENODEV); + + np = dev->of_node; + + /* + * When the consumer DT node do not have "interconnects" property + * return a NULL path to skip setting constraints. + */ + if (!of_find_property(np, "interconnects", NULL)) + return NULL; + + /* + * We use a combination of phandle and specifier for endpoint. For now + * lets support only global ids and extend this in the future if needed + * without breaking DT compatibility. + */ + if (name) { + idx = of_property_match_string(np, "interconnect-names", name); + if (idx < 0) + return ERR_PTR(idx); + } + + ret = of_parse_phandle_with_args(np, "interconnects", + "#interconnect-cells", idx * 2, + &src_args); + if (ret) + return ERR_PTR(ret); + + of_node_put(src_args.np); + + ret = of_parse_phandle_with_args(np, "interconnects", + "#interconnect-cells", idx * 2 + 1, + &dst_args); + if (ret) + return ERR_PTR(ret); + + of_node_put(dst_args.np); + + src_node = of_icc_get_from_provider(&src_args); + + if (IS_ERR(src_node)) { + if (PTR_ERR(src_node) != -EPROBE_DEFER) + dev_err(dev, "error finding src node: %ld\n", + PTR_ERR(src_node)); + return ERR_CAST(src_node); + } + + dst_node = of_icc_get_from_provider(&dst_args); + + if (IS_ERR(dst_node)) { + if (PTR_ERR(dst_node) != -EPROBE_DEFER) + dev_err(dev, "error finding dst node: %ld\n", + PTR_ERR(dst_node)); + return ERR_CAST(dst_node); + } + + mutex_lock(&icc_lock); + path = path_find(dev, src_node, dst_node); + if (IS_ERR(path)) + dev_err(dev, "%s: invalid path=%ld\n", __func__, PTR_ERR(path)); + mutex_unlock(&icc_lock); + + return path; +} +EXPORT_SYMBOL_GPL(of_icc_get); + /** * icc_set_bw() - set bandwidth constraints on an interconnect path * @path: reference to the path returned by icc_get() @@ -519,6 +666,8 @@ int icc_provider_add(struct icc_provider *provider) { if (WARN_ON(!provider->set)) return -EINVAL; + if (WARN_ON(!provider->xlate)) + return -EINVAL; mutex_lock(&icc_lock); diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h index 78208a754181..63caccadc2db 100644 --- a/include/linux/interconnect-provider.h +++ b/include/linux/interconnect-provider.h @@ -12,6 +12,21 @@ #define icc_units_to_bps(bw) ((bw) * 1000ULL) struct icc_node; +struct of_phandle_args; + +/** + * struct icc_onecell_data - driver data for onecell interconnect providers + * + * @num_nodes: number of nodes in this device + * @nodes: array of pointers to the nodes in this device + */ +struct icc_onecell_data { + unsigned int num_nodes; + struct icc_node *nodes[]; +}; + +struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec, + void *data); /** * struct icc_provider - interconnect provider (controller) entity that might @@ -21,6 +36,7 @@ struct icc_node; * @nodes: internal list of the interconnect provider nodes * @set: pointer to device specific set operation function * @aggregate: pointer to device specific aggregate operation function + * @xlate: provider-specific callback for mapping nodes from phandle arguments * @dev: the device this interconnect provider belongs to * @users: count of active users * @data: pointer to private data @@ -31,6 +47,7 @@ struct icc_provider { int (*set)(struct icc_node *src, struct icc_node *dst); int (*aggregate)(struct icc_node *node, u32 avg_bw, u32 peak_bw, u32 *agg_avg, u32 *agg_peak); + struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data); struct device *dev; int users; void *data; diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h index c331afb3a2c8..dc25864755ba 100644 --- a/include/linux/interconnect.h +++ b/include/linux/interconnect.h @@ -27,6 +27,7 @@ struct device; struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id); +struct icc_path *of_icc_get(struct device *dev, const char *name); void icc_put(struct icc_path *path); int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw); @@ -38,6 +39,12 @@ static inline struct icc_path *icc_get(struct device *dev, const int src_id, return NULL; } +static inline struct icc_path *of_icc_get(struct device *dev, + const char *name) +{ + return NULL; +} + static inline void icc_put(struct icc_path *path) { } -- cgit v1.2.3-71-gd317 From b5d2f741077abc71205e60b5bbd7dfa07b9d6953 Mon Sep 17 00:00:00 2001 From: David Dai Date: Wed, 16 Jan 2019 18:11:00 +0200 Subject: interconnect: qcom: Add sdm845 interconnect provider driver Introduce Qualcomm SDM845 specific provider driver using the interconnect framework. Signed-off-by: David Dai Acked-by: Rob Herring Signed-off-by: Georgi Djakov Signed-off-by: Greg Kroah-Hartman --- .../bindings/interconnect/qcom,sdm845.txt | 24 + drivers/interconnect/Kconfig | 5 + drivers/interconnect/Makefile | 1 + drivers/interconnect/qcom/Kconfig | 13 + drivers/interconnect/qcom/Makefile | 5 + drivers/interconnect/qcom/sdm845.c | 838 +++++++++++++++++++++ include/dt-bindings/interconnect/qcom,sdm845.h | 143 ++++ 7 files changed, 1029 insertions(+) create mode 100644 Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt create mode 100644 drivers/interconnect/qcom/Kconfig create mode 100644 drivers/interconnect/qcom/Makefile create mode 100644 drivers/interconnect/qcom/sdm845.c create mode 100644 include/dt-bindings/interconnect/qcom,sdm845.h (limited to 'include') diff --git a/Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt b/Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt new file mode 100644 index 000000000000..5c4f1d911630 --- /dev/null +++ b/Documentation/devicetree/bindings/interconnect/qcom,sdm845.txt @@ -0,0 +1,24 @@ +Qualcomm SDM845 Network-On-Chip interconnect driver binding +----------------------------------------------------------- + +SDM845 interconnect providers support system bandwidth requirements through +RPMh hardware accelerators known as Bus Clock Manager (BCM). The provider is +able to communicate with the BCM through the Resource State Coordinator (RSC) +associated with each execution environment. Provider nodes must reside within +an RPMh device node pertaining to their RSC and each provider maps to a single +RPMh resource. + +Required properties : +- compatible : shall contain only one of the following: + "qcom,sdm845-rsc-hlos" +- #interconnect-cells : should contain 1 + +Examples: + +apps_rsc: rsc { + rsc_hlos: interconnect { + compatible = "qcom,sdm845-rsc-hlos"; + #interconnect-cells = <1>; + }; +}; + diff --git a/drivers/interconnect/Kconfig b/drivers/interconnect/Kconfig index a261c7d41deb..07a8276fa35a 100644 --- a/drivers/interconnect/Kconfig +++ b/drivers/interconnect/Kconfig @@ -8,3 +8,8 @@ menuconfig INTERCONNECT If unsure, say no. +if INTERCONNECT + +source "drivers/interconnect/qcom/Kconfig" + +endif diff --git a/drivers/interconnect/Makefile b/drivers/interconnect/Makefile index 7a01f33b5593..28f2ab0824d5 100644 --- a/drivers/interconnect/Makefile +++ b/drivers/interconnect/Makefile @@ -3,3 +3,4 @@ icc-core-objs := core.o obj-$(CONFIG_INTERCONNECT) += icc-core.o +obj-$(CONFIG_INTERCONNECT_QCOM) += qcom/ diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig new file mode 100644 index 000000000000..290d330abe5a --- /dev/null +++ b/drivers/interconnect/qcom/Kconfig @@ -0,0 +1,13 @@ +config INTERCONNECT_QCOM + bool "Qualcomm Network-on-Chip interconnect drivers" + depends on ARCH_QCOM + help + Support for Qualcomm's Network-on-Chip interconnect hardware. + +config INTERCONNECT_QCOM_SDM845 + tristate "Qualcomm SDM845 interconnect driver" + depends on INTERCONNECT_QCOM + depends on (QCOM_RPMH && QCOM_COMMAND_DB && OF) || COMPILE_TEST + help + This is a driver for the Qualcomm Network-on-Chip on sdm845-based + platforms. diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile new file mode 100644 index 000000000000..1c1cea690f92 --- /dev/null +++ b/drivers/interconnect/qcom/Makefile @@ -0,0 +1,5 @@ +# SPDX-License-Identifier: GPL-2.0 + +qnoc-sdm845-objs := sdm845.o + +obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o diff --git a/drivers/interconnect/qcom/sdm845.c b/drivers/interconnect/qcom/sdm845.c new file mode 100644 index 000000000000..4915b78da673 --- /dev/null +++ b/drivers/interconnect/qcom/sdm845.c @@ -0,0 +1,838 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018, The Linux Foundation. All rights reserved. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define BCM_TCS_CMD_COMMIT_SHFT 30 +#define BCM_TCS_CMD_COMMIT_MASK 0x40000000 +#define BCM_TCS_CMD_VALID_SHFT 29 +#define BCM_TCS_CMD_VALID_MASK 0x20000000 +#define BCM_TCS_CMD_VOTE_X_SHFT 14 +#define BCM_TCS_CMD_VOTE_MASK 0x3fff +#define BCM_TCS_CMD_VOTE_Y_SHFT 0 +#define BCM_TCS_CMD_VOTE_Y_MASK 0xfffc000 + +#define BCM_TCS_CMD(commit, valid, vote_x, vote_y) \ + (((commit) << BCM_TCS_CMD_COMMIT_SHFT) | \ + ((valid) << BCM_TCS_CMD_VALID_SHFT) | \ + ((cpu_to_le32(vote_x) & \ + BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_X_SHFT) | \ + ((cpu_to_le32(vote_y) & \ + BCM_TCS_CMD_VOTE_MASK) << BCM_TCS_CMD_VOTE_Y_SHFT)) + +#define to_qcom_provider(_provider) \ + container_of(_provider, struct qcom_icc_provider, provider) + +struct qcom_icc_provider { + struct icc_provider provider; + struct device *dev; + struct qcom_icc_bcm **bcms; + size_t num_bcms; +}; + +/** + * struct bcm_db - Auxiliary data pertaining to each Bus Clock Manager (BCM) + * @unit: divisor used to convert bytes/sec bw value to an RPMh msg + * @width: multiplier used to convert bytes/sec bw value to an RPMh msg + * @vcd: virtual clock domain that this bcm belongs to + * @reserved: reserved field + */ +struct bcm_db { + __le32 unit; + __le16 width; + u8 vcd; + u8 reserved; +}; + +#define SDM845_MAX_LINKS 43 +#define SDM845_MAX_BCMS 30 +#define SDM845_MAX_BCM_PER_NODE 2 +#define SDM845_MAX_VCD 10 + +/** + * struct qcom_icc_node - Qualcomm specific interconnect nodes + * @name: the node name used in debugfs + * @links: an array of nodes where we can go next while traversing + * @id: a unique node identifier + * @num_links: the total number of @links + * @channels: num of channels at this node + * @buswidth: width of the interconnect between a node and the bus + * @sum_avg: current sum aggregate value of all avg bw requests + * @max_peak: current max aggregate value of all peak bw requests + * @bcms: list of bcms associated with this logical node + * @num_bcms: num of @bcms + */ +struct qcom_icc_node { + const char *name; + u16 links[SDM845_MAX_LINKS]; + u16 id; + u16 num_links; + u16 channels; + u16 buswidth; + u64 sum_avg; + u64 max_peak; + struct qcom_icc_bcm *bcms[SDM845_MAX_BCM_PER_NODE]; + size_t num_bcms; +}; + +/** + * struct qcom_icc_bcm - Qualcomm specific hardware accelerator nodes + * known as Bus Clock Manager (BCM) + * @name: the bcm node name used to fetch BCM data from command db + * @type: latency or bandwidth bcm + * @addr: address offsets used when voting to RPMH + * @vote_x: aggregated threshold values, represents sum_bw when @type is bw bcm + * @vote_y: aggregated threshold values, represents peak_bw when @type is bw bcm + * @dirty: flag used to indicate whether the bcm needs to be committed + * @keepalive: flag used to indicate whether a keepalive is required + * @aux_data: auxiliary data used when calculating threshold values and + * communicating with RPMh + * @list: used to link to other bcms when compiling lists for commit + * @num_nodes: total number of @num_nodes + * @nodes: list of qcom_icc_nodes that this BCM encapsulates + */ +struct qcom_icc_bcm { + const char *name; + u32 type; + u32 addr; + u64 vote_x; + u64 vote_y; + bool dirty; + bool keepalive; + struct bcm_db aux_data; + struct list_head list; + size_t num_nodes; + struct qcom_icc_node *nodes[]; +}; + +struct qcom_icc_fabric { + struct qcom_icc_node **nodes; + size_t num_nodes; +}; + +struct qcom_icc_desc { + struct qcom_icc_node **nodes; + size_t num_nodes; + struct qcom_icc_bcm **bcms; + size_t num_bcms; +}; + +#define DEFINE_QNODE(_name, _id, _channels, _buswidth, \ + _numlinks, ...) \ + static struct qcom_icc_node _name = { \ + .id = _id, \ + .name = #_name, \ + .channels = _channels, \ + .buswidth = _buswidth, \ + .num_links = _numlinks, \ + .links = { __VA_ARGS__ }, \ + } + +DEFINE_QNODE(qhm_a1noc_cfg, MASTER_A1NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A1NOC); +DEFINE_QNODE(qhm_qup1, MASTER_BLSP_1, 1, 4, 1, SLAVE_A1NOC_SNOC); +DEFINE_QNODE(qhm_tsif, MASTER_TSIF, 1, 4, 1, SLAVE_A1NOC_SNOC); +DEFINE_QNODE(xm_sdc2, MASTER_SDCC_2, 1, 8, 1, SLAVE_A1NOC_SNOC); +DEFINE_QNODE(xm_sdc4, MASTER_SDCC_4, 1, 8, 1, SLAVE_A1NOC_SNOC); +DEFINE_QNODE(xm_ufs_card, MASTER_UFS_CARD, 1, 8, 1, SLAVE_A1NOC_SNOC); +DEFINE_QNODE(xm_ufs_mem, MASTER_UFS_MEM, 1, 8, 1, SLAVE_A1NOC_SNOC); +DEFINE_QNODE(xm_pcie_0, MASTER_PCIE_0, 1, 8, 1, SLAVE_ANOC_PCIE_A1NOC_SNOC); +DEFINE_QNODE(qhm_a2noc_cfg, MASTER_A2NOC_CFG, 1, 4, 1, SLAVE_SERVICE_A2NOC); +DEFINE_QNODE(qhm_qdss_bam, MASTER_QDSS_BAM, 1, 4, 1, SLAVE_A2NOC_SNOC); +DEFINE_QNODE(qhm_qup2, MASTER_BLSP_2, 1, 4, 1, SLAVE_A2NOC_SNOC); +DEFINE_QNODE(qnm_cnoc, MASTER_CNOC_A2NOC, 1, 8, 1, SLAVE_A2NOC_SNOC); +DEFINE_QNODE(qxm_crypto, MASTER_CRYPTO, 1, 8, 1, SLAVE_A2NOC_SNOC); +DEFINE_QNODE(qxm_ipa, MASTER_IPA, 1, 8, 1, SLAVE_A2NOC_SNOC); +DEFINE_QNODE(xm_pcie3_1, MASTER_PCIE_1, 1, 8, 1, SLAVE_ANOC_PCIE_SNOC); +DEFINE_QNODE(xm_qdss_etr, MASTER_QDSS_ETR, 1, 8, 1, SLAVE_A2NOC_SNOC); +DEFINE_QNODE(xm_usb3_0, MASTER_USB3_0, 1, 8, 1, SLAVE_A2NOC_SNOC); +DEFINE_QNODE(xm_usb3_1, MASTER_USB3_1, 1, 8, 1, SLAVE_A2NOC_SNOC); +DEFINE_QNODE(qxm_camnoc_hf0_uncomp, MASTER_CAMNOC_HF0_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP); +DEFINE_QNODE(qxm_camnoc_hf1_uncomp, MASTER_CAMNOC_HF1_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP); +DEFINE_QNODE(qxm_camnoc_sf_uncomp, MASTER_CAMNOC_SF_UNCOMP, 1, 32, 1, SLAVE_CAMNOC_UNCOMP); +DEFINE_QNODE(qhm_spdm, MASTER_SPDM, 1, 4, 1, SLAVE_CNOC_A2NOC); +DEFINE_QNODE(qhm_tic, MASTER_TIC, 1, 4, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC); +DEFINE_QNODE(qnm_snoc, MASTER_SNOC_CNOC, 1, 8, 42, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_SERVICE_CNOC); +DEFINE_QNODE(xm_qdss_dap, MASTER_QDSS_DAP, 1, 8, 43, SLAVE_A1NOC_CFG, SLAVE_A2NOC_CFG, SLAVE_AOP, SLAVE_AOSS, SLAVE_CAMERA_CFG, SLAVE_CLK_CTL, SLAVE_CDSP_CFG, SLAVE_RBCPR_CX_CFG, SLAVE_CRYPTO_0_CFG, SLAVE_DCC_CFG, SLAVE_CNOC_DDRSS, SLAVE_DISPLAY_CFG, SLAVE_GLM, SLAVE_GFX3D_CFG, SLAVE_IMEM_CFG, SLAVE_IPA_CFG, SLAVE_CNOC_MNOC_CFG, SLAVE_PCIE_0_CFG, SLAVE_PCIE_1_CFG, SLAVE_PDM, SLAVE_SOUTH_PHY_CFG, SLAVE_PIMEM_CFG, SLAVE_PRNG, SLAVE_QDSS_CFG, SLAVE_BLSP_2, SLAVE_BLSP_1, SLAVE_SDCC_2, SLAVE_SDCC_4, SLAVE_SNOC_CFG, SLAVE_SPDM_WRAPPER, SLAVE_SPSS_CFG, SLAVE_TCSR, SLAVE_TLMM_NORTH, SLAVE_TLMM_SOUTH, SLAVE_TSIF, SLAVE_UFS_CARD_CFG, SLAVE_UFS_MEM_CFG, SLAVE_USB3_0, SLAVE_USB3_1, SLAVE_VENUS_CFG, SLAVE_VSENSE_CTRL_CFG, SLAVE_CNOC_A2NOC, SLAVE_SERVICE_CNOC); +DEFINE_QNODE(qhm_cnoc, MASTER_CNOC_DC_NOC, 1, 4, 2, SLAVE_LLCC_CFG, SLAVE_MEM_NOC_CFG); +DEFINE_QNODE(acm_l3, MASTER_APPSS_PROC, 1, 16, 3, SLAVE_GNOC_SNOC, SLAVE_GNOC_MEM_NOC, SLAVE_SERVICE_GNOC); +DEFINE_QNODE(pm_gnoc_cfg, MASTER_GNOC_CFG, 1, 4, 1, SLAVE_SERVICE_GNOC); +DEFINE_QNODE(llcc_mc, MASTER_LLCC, 4, 4, 1, SLAVE_EBI1); +DEFINE_QNODE(acm_tcu, MASTER_TCU_0, 1, 8, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC); +DEFINE_QNODE(qhm_memnoc_cfg, MASTER_MEM_NOC_CFG, 1, 4, 2, SLAVE_MSS_PROC_MS_MPU_CFG, SLAVE_SERVICE_MEM_NOC); +DEFINE_QNODE(qnm_apps, MASTER_GNOC_MEM_NOC, 2, 32, 1, SLAVE_LLCC); +DEFINE_QNODE(qnm_mnoc_hf, MASTER_MNOC_HF_MEM_NOC, 2, 32, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC); +DEFINE_QNODE(qnm_mnoc_sf, MASTER_MNOC_SF_MEM_NOC, 1, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC); +DEFINE_QNODE(qnm_snoc_gc, MASTER_SNOC_GC_MEM_NOC, 1, 8, 1, SLAVE_LLCC); +DEFINE_QNODE(qnm_snoc_sf, MASTER_SNOC_SF_MEM_NOC, 1, 16, 2, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC); +DEFINE_QNODE(qxm_gpu, MASTER_GFX3D, 2, 32, 3, SLAVE_MEM_NOC_GNOC, SLAVE_LLCC, SLAVE_MEM_NOC_SNOC); +DEFINE_QNODE(qhm_mnoc_cfg, MASTER_CNOC_MNOC_CFG, 1, 4, 1, SLAVE_SERVICE_MNOC); +DEFINE_QNODE(qxm_camnoc_hf0, MASTER_CAMNOC_HF0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC); +DEFINE_QNODE(qxm_camnoc_hf1, MASTER_CAMNOC_HF1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC); +DEFINE_QNODE(qxm_camnoc_sf, MASTER_CAMNOC_SF, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC); +DEFINE_QNODE(qxm_mdp0, MASTER_MDP0, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC); +DEFINE_QNODE(qxm_mdp1, MASTER_MDP1, 1, 32, 1, SLAVE_MNOC_HF_MEM_NOC); +DEFINE_QNODE(qxm_rot, MASTER_ROTATOR, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC); +DEFINE_QNODE(qxm_venus0, MASTER_VIDEO_P0, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC); +DEFINE_QNODE(qxm_venus1, MASTER_VIDEO_P1, 1, 32, 1, SLAVE_MNOC_SF_MEM_NOC); +DEFINE_QNODE(qxm_venus_arm9, MASTER_VIDEO_PROC, 1, 8, 1, SLAVE_MNOC_SF_MEM_NOC); +DEFINE_QNODE(qhm_snoc_cfg, MASTER_SNOC_CFG, 1, 4, 1, SLAVE_SERVICE_SNOC); +DEFINE_QNODE(qnm_aggre1_noc, MASTER_A1NOC_SNOC, 1, 16, 6, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM); +DEFINE_QNODE(qnm_aggre2_noc, MASTER_A2NOC_SNOC, 1, 16, 9, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU); +DEFINE_QNODE(qnm_gladiator_sodv, MASTER_GNOC_SNOC, 1, 8, 8, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PCIE_0, SLAVE_PCIE_1, SLAVE_PIMEM, SLAVE_QDSS_STM, SLAVE_TCU); +DEFINE_QNODE(qnm_memnoc, MASTER_MEM_NOC_SNOC, 1, 8, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_IMEM, SLAVE_PIMEM, SLAVE_QDSS_STM); +DEFINE_QNODE(qnm_pcie_anoc, MASTER_ANOC_PCIE_SNOC, 1, 16, 5, SLAVE_APPSS, SLAVE_SNOC_CNOC, SLAVE_SNOC_MEM_NOC_SF, SLAVE_IMEM, SLAVE_QDSS_STM); +DEFINE_QNODE(qxm_pimem, MASTER_PIMEM, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM); +DEFINE_QNODE(xm_gic, MASTER_GIC, 1, 8, 2, SLAVE_SNOC_MEM_NOC_GC, SLAVE_IMEM); +DEFINE_QNODE(qns_a1noc_snoc, SLAVE_A1NOC_SNOC, 1, 16, 1, MASTER_A1NOC_SNOC); +DEFINE_QNODE(srvc_aggre1_noc, SLAVE_SERVICE_A1NOC, 1, 4, 0); +DEFINE_QNODE(qns_pcie_a1noc_snoc, SLAVE_ANOC_PCIE_A1NOC_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC); +DEFINE_QNODE(qns_a2noc_snoc, SLAVE_A2NOC_SNOC, 1, 16, 1, MASTER_A2NOC_SNOC); +DEFINE_QNODE(qns_pcie_snoc, SLAVE_ANOC_PCIE_SNOC, 1, 16, 1, MASTER_ANOC_PCIE_SNOC); +DEFINE_QNODE(srvc_aggre2_noc, SLAVE_SERVICE_A2NOC, 1, 4, 0); +DEFINE_QNODE(qns_camnoc_uncomp, SLAVE_CAMNOC_UNCOMP, 1, 32, 0); +DEFINE_QNODE(qhs_a1_noc_cfg, SLAVE_A1NOC_CFG, 1, 4, 1, MASTER_A1NOC_CFG); +DEFINE_QNODE(qhs_a2_noc_cfg, SLAVE_A2NOC_CFG, 1, 4, 1, MASTER_A2NOC_CFG); +DEFINE_QNODE(qhs_aop, SLAVE_AOP, 1, 4, 0); +DEFINE_QNODE(qhs_aoss, SLAVE_AOSS, 1, 4, 0); +DEFINE_QNODE(qhs_camera_cfg, SLAVE_CAMERA_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_clk_ctl, SLAVE_CLK_CTL, 1, 4, 0); +DEFINE_QNODE(qhs_compute_dsp_cfg, SLAVE_CDSP_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_cpr_cx, SLAVE_RBCPR_CX_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_crypto0_cfg, SLAVE_CRYPTO_0_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_dcc_cfg, SLAVE_DCC_CFG, 1, 4, 1, MASTER_CNOC_DC_NOC); +DEFINE_QNODE(qhs_ddrss_cfg, SLAVE_CNOC_DDRSS, 1, 4, 0); +DEFINE_QNODE(qhs_display_cfg, SLAVE_DISPLAY_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_glm, SLAVE_GLM, 1, 4, 0); +DEFINE_QNODE(qhs_gpuss_cfg, SLAVE_GFX3D_CFG, 1, 8, 0); +DEFINE_QNODE(qhs_imem_cfg, SLAVE_IMEM_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_ipa, SLAVE_IPA_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_mnoc_cfg, SLAVE_CNOC_MNOC_CFG, 1, 4, 1, MASTER_CNOC_MNOC_CFG); +DEFINE_QNODE(qhs_pcie0_cfg, SLAVE_PCIE_0_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_pcie_gen3_cfg, SLAVE_PCIE_1_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_pdm, SLAVE_PDM, 1, 4, 0); +DEFINE_QNODE(qhs_phy_refgen_south, SLAVE_SOUTH_PHY_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_pimem_cfg, SLAVE_PIMEM_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_prng, SLAVE_PRNG, 1, 4, 0); +DEFINE_QNODE(qhs_qdss_cfg, SLAVE_QDSS_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_qupv3_north, SLAVE_BLSP_2, 1, 4, 0); +DEFINE_QNODE(qhs_qupv3_south, SLAVE_BLSP_1, 1, 4, 0); +DEFINE_QNODE(qhs_sdc2, SLAVE_SDCC_2, 1, 4, 0); +DEFINE_QNODE(qhs_sdc4, SLAVE_SDCC_4, 1, 4, 0); +DEFINE_QNODE(qhs_snoc_cfg, SLAVE_SNOC_CFG, 1, 4, 1, MASTER_SNOC_CFG); +DEFINE_QNODE(qhs_spdm, SLAVE_SPDM_WRAPPER, 1, 4, 0); +DEFINE_QNODE(qhs_spss_cfg, SLAVE_SPSS_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_tcsr, SLAVE_TCSR, 1, 4, 0); +DEFINE_QNODE(qhs_tlmm_north, SLAVE_TLMM_NORTH, 1, 4, 0); +DEFINE_QNODE(qhs_tlmm_south, SLAVE_TLMM_SOUTH, 1, 4, 0); +DEFINE_QNODE(qhs_tsif, SLAVE_TSIF, 1, 4, 0); +DEFINE_QNODE(qhs_ufs_card_cfg, SLAVE_UFS_CARD_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_ufs_mem_cfg, SLAVE_UFS_MEM_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_usb3_0, SLAVE_USB3_0, 1, 4, 0); +DEFINE_QNODE(qhs_usb3_1, SLAVE_USB3_1, 1, 4, 0); +DEFINE_QNODE(qhs_venus_cfg, SLAVE_VENUS_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_vsense_ctrl_cfg, SLAVE_VSENSE_CTRL_CFG, 1, 4, 0); +DEFINE_QNODE(qns_cnoc_a2noc, SLAVE_CNOC_A2NOC, 1, 8, 1, MASTER_CNOC_A2NOC); +DEFINE_QNODE(srvc_cnoc, SLAVE_SERVICE_CNOC, 1, 4, 0); +DEFINE_QNODE(qhs_llcc, SLAVE_LLCC_CFG, 1, 4, 0); +DEFINE_QNODE(qhs_memnoc, SLAVE_MEM_NOC_CFG, 1, 4, 1, MASTER_MEM_NOC_CFG); +DEFINE_QNODE(qns_gladiator_sodv, SLAVE_GNOC_SNOC, 1, 8, 1, MASTER_GNOC_SNOC); +DEFINE_QNODE(qns_gnoc_memnoc, SLAVE_GNOC_MEM_NOC, 2, 32, 1, MASTER_GNOC_MEM_NOC); +DEFINE_QNODE(srvc_gnoc, SLAVE_SERVICE_GNOC, 1, 4, 0); +DEFINE_QNODE(ebi, SLAVE_EBI1, 4, 4, 0); +DEFINE_QNODE(qhs_mdsp_ms_mpu_cfg, SLAVE_MSS_PROC_MS_MPU_CFG, 1, 4, 0); +DEFINE_QNODE(qns_apps_io, SLAVE_MEM_NOC_GNOC, 1, 32, 0); +DEFINE_QNODE(qns_llcc, SLAVE_LLCC, 4, 16, 1, MASTER_LLCC); +DEFINE_QNODE(qns_memnoc_snoc, SLAVE_MEM_NOC_SNOC, 1, 8, 1, MASTER_MEM_NOC_SNOC); +DEFINE_QNODE(srvc_memnoc, SLAVE_SERVICE_MEM_NOC, 1, 4, 0); +DEFINE_QNODE(qns2_mem_noc, SLAVE_MNOC_SF_MEM_NOC, 1, 32, 1, MASTER_MNOC_SF_MEM_NOC); +DEFINE_QNODE(qns_mem_noc_hf, SLAVE_MNOC_HF_MEM_NOC, 2, 32, 1, MASTER_MNOC_HF_MEM_NOC); +DEFINE_QNODE(srvc_mnoc, SLAVE_SERVICE_MNOC, 1, 4, 0); +DEFINE_QNODE(qhs_apss, SLAVE_APPSS, 1, 8, 0); +DEFINE_QNODE(qns_cnoc, SLAVE_SNOC_CNOC, 1, 8, 1, MASTER_SNOC_CNOC); +DEFINE_QNODE(qns_memnoc_gc, SLAVE_SNOC_MEM_NOC_GC, 1, 8, 1, MASTER_SNOC_GC_MEM_NOC); +DEFINE_QNODE(qns_memnoc_sf, SLAVE_SNOC_MEM_NOC_SF, 1, 16, 1, MASTER_SNOC_SF_MEM_NOC); +DEFINE_QNODE(qxs_imem, SLAVE_IMEM, 1, 8, 0); +DEFINE_QNODE(qxs_pcie, SLAVE_PCIE_0, 1, 8, 0); +DEFINE_QNODE(qxs_pcie_gen3, SLAVE_PCIE_1, 1, 8, 0); +DEFINE_QNODE(qxs_pimem, SLAVE_PIMEM, 1, 8, 0); +DEFINE_QNODE(srvc_snoc, SLAVE_SERVICE_SNOC, 1, 4, 0); +DEFINE_QNODE(xs_qdss_stm, SLAVE_QDSS_STM, 1, 4, 0); +DEFINE_QNODE(xs_sys_tcu_cfg, SLAVE_TCU, 1, 8, 0); + +#define DEFINE_QBCM(_name, _bcmname, _keepalive, _numnodes, ...) \ + static struct qcom_icc_bcm _name = { \ + .name = _bcmname, \ + .keepalive = _keepalive, \ + .num_nodes = _numnodes, \ + .nodes = { __VA_ARGS__ }, \ + } + +DEFINE_QBCM(bcm_acv, "ACV", false, 1, &ebi); +DEFINE_QBCM(bcm_mc0, "MC0", true, 1, &ebi); +DEFINE_QBCM(bcm_sh0, "SH0", true, 1, &qns_llcc); +DEFINE_QBCM(bcm_mm0, "MM0", false, 1, &qns_mem_noc_hf); +DEFINE_QBCM(bcm_sh1, "SH1", false, 1, &qns_apps_io); +DEFINE_QBCM(bcm_mm1, "MM1", false, 7, &qxm_camnoc_hf0_uncomp, &qxm_camnoc_hf1_uncomp, &qxm_camnoc_sf_uncomp, &qxm_camnoc_hf0, &qxm_camnoc_hf1, &qxm_mdp0, &qxm_mdp1); +DEFINE_QBCM(bcm_sh2, "SH2", false, 1, &qns_memnoc_snoc); +DEFINE_QBCM(bcm_mm2, "MM2", false, 1, &qns2_mem_noc); +DEFINE_QBCM(bcm_sh3, "SH3", false, 1, &acm_tcu); +DEFINE_QBCM(bcm_mm3, "MM3", false, 5, &qxm_camnoc_sf, &qxm_rot, &qxm_venus0, &qxm_venus1, &qxm_venus_arm9); +DEFINE_QBCM(bcm_sh5, "SH5", false, 1, &qnm_apps); +DEFINE_QBCM(bcm_sn0, "SN0", true, 1, &qns_memnoc_sf); +DEFINE_QBCM(bcm_ce0, "CE0", false, 1, &qxm_crypto); +DEFINE_QBCM(bcm_cn0, "CN0", false, 47, &qhm_spdm, &qhm_tic, &qnm_snoc, &xm_qdss_dap, &qhs_a1_noc_cfg, &qhs_a2_noc_cfg, &qhs_aop, &qhs_aoss, &qhs_camera_cfg, &qhs_clk_ctl, &qhs_compute_dsp_cfg, &qhs_cpr_cx, &qhs_crypto0_cfg, &qhs_dcc_cfg, &qhs_ddrss_cfg, &qhs_display_cfg, &qhs_glm, &qhs_gpuss_cfg, &qhs_imem_cfg, &qhs_ipa, &qhs_mnoc_cfg, &qhs_pcie0_cfg, &qhs_pcie_gen3_cfg, &qhs_pdm, &qhs_phy_refgen_south, &qhs_pimem_cfg, &qhs_prng, &qhs_qdss_cfg, &qhs_qupv3_north, &qhs_qupv3_south, &qhs_sdc2, &qhs_sdc4, &qhs_snoc_cfg, &qhs_spdm, &qhs_spss_cfg, &qhs_tcsr, &qhs_tlmm_north, &qhs_tlmm_south, &qhs_tsif, &qhs_ufs_card_cfg, &qhs_ufs_mem_cfg, &qhs_usb3_0, &qhs_usb3_1, &qhs_venus_cfg, &qhs_vsense_ctrl_cfg, &qns_cnoc_a2noc, &srvc_cnoc); +DEFINE_QBCM(bcm_qup0, "QUP0", false, 2, &qhm_qup1, &qhm_qup2); +DEFINE_QBCM(bcm_sn1, "SN1", false, 1, &qxs_imem); +DEFINE_QBCM(bcm_sn2, "SN2", false, 1, &qns_memnoc_gc); +DEFINE_QBCM(bcm_sn3, "SN3", false, 1, &qns_cnoc); +DEFINE_QBCM(bcm_sn4, "SN4", false, 1, &qxm_pimem); +DEFINE_QBCM(bcm_sn5, "SN5", false, 1, &xs_qdss_stm); +DEFINE_QBCM(bcm_sn6, "SN6", false, 3, &qhs_apss, &srvc_snoc, &xs_sys_tcu_cfg); +DEFINE_QBCM(bcm_sn7, "SN7", false, 1, &qxs_pcie); +DEFINE_QBCM(bcm_sn8, "SN8", false, 1, &qxs_pcie_gen3); +DEFINE_QBCM(bcm_sn9, "SN9", false, 2, &srvc_aggre1_noc, &qnm_aggre1_noc); +DEFINE_QBCM(bcm_sn11, "SN11", false, 2, &srvc_aggre2_noc, &qnm_aggre2_noc); +DEFINE_QBCM(bcm_sn12, "SN12", false, 2, &qnm_gladiator_sodv, &xm_gic); +DEFINE_QBCM(bcm_sn14, "SN14", false, 1, &qnm_pcie_anoc); +DEFINE_QBCM(bcm_sn15, "SN15", false, 1, &qnm_memnoc); + +static struct qcom_icc_node *rsc_hlos_nodes[] = { + [MASTER_APPSS_PROC] = &acm_l3, + [MASTER_TCU_0] = &acm_tcu, + [MASTER_LLCC] = &llcc_mc, + [MASTER_GNOC_CFG] = &pm_gnoc_cfg, + [MASTER_A1NOC_CFG] = &qhm_a1noc_cfg, + [MASTER_A2NOC_CFG] = &qhm_a2noc_cfg, + [MASTER_CNOC_DC_NOC] = &qhm_cnoc, + [MASTER_MEM_NOC_CFG] = &qhm_memnoc_cfg, + [MASTER_CNOC_MNOC_CFG] = &qhm_mnoc_cfg, + [MASTER_QDSS_BAM] = &qhm_qdss_bam, + [MASTER_BLSP_1] = &qhm_qup1, + [MASTER_BLSP_2] = &qhm_qup2, + [MASTER_SNOC_CFG] = &qhm_snoc_cfg, + [MASTER_SPDM] = &qhm_spdm, + [MASTER_TIC] = &qhm_tic, + [MASTER_TSIF] = &qhm_tsif, + [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc, + [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc, + [MASTER_GNOC_MEM_NOC] = &qnm_apps, + [MASTER_CNOC_A2NOC] = &qnm_cnoc, + [MASTER_GNOC_SNOC] = &qnm_gladiator_sodv, + [MASTER_MEM_NOC_SNOC] = &qnm_memnoc, + [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf, + [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf, + [MASTER_ANOC_PCIE_SNOC] = &qnm_pcie_anoc, + [MASTER_SNOC_CNOC] = &qnm_snoc, + [MASTER_SNOC_GC_MEM_NOC] = &qnm_snoc_gc, + [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf, + [MASTER_CAMNOC_HF0] = &qxm_camnoc_hf0, + [MASTER_CAMNOC_HF0_UNCOMP] = &qxm_camnoc_hf0_uncomp, + [MASTER_CAMNOC_HF1] = &qxm_camnoc_hf1, + [MASTER_CAMNOC_HF1_UNCOMP] = &qxm_camnoc_hf1_uncomp, + [MASTER_CAMNOC_SF] = &qxm_camnoc_sf, + [MASTER_CAMNOC_SF_UNCOMP] = &qxm_camnoc_sf_uncomp, + [MASTER_CRYPTO] = &qxm_crypto, + [MASTER_GFX3D] = &qxm_gpu, + [MASTER_IPA] = &qxm_ipa, + [MASTER_MDP0] = &qxm_mdp0, + [MASTER_MDP1] = &qxm_mdp1, + [MASTER_PIMEM] = &qxm_pimem, + [MASTER_ROTATOR] = &qxm_rot, + [MASTER_VIDEO_P0] = &qxm_venus0, + [MASTER_VIDEO_P1] = &qxm_venus1, + [MASTER_VIDEO_PROC] = &qxm_venus_arm9, + [MASTER_GIC] = &xm_gic, + [MASTER_PCIE_1] = &xm_pcie3_1, + [MASTER_PCIE_0] = &xm_pcie_0, + [MASTER_QDSS_DAP] = &xm_qdss_dap, + [MASTER_QDSS_ETR] = &xm_qdss_etr, + [MASTER_SDCC_2] = &xm_sdc2, + [MASTER_SDCC_4] = &xm_sdc4, + [MASTER_UFS_CARD] = &xm_ufs_card, + [MASTER_UFS_MEM] = &xm_ufs_mem, + [MASTER_USB3_0] = &xm_usb3_0, + [MASTER_USB3_1] = &xm_usb3_1, + [SLAVE_EBI1] = &ebi, + [SLAVE_A1NOC_CFG] = &qhs_a1_noc_cfg, + [SLAVE_A2NOC_CFG] = &qhs_a2_noc_cfg, + [SLAVE_AOP] = &qhs_aop, + [SLAVE_AOSS] = &qhs_aoss, + [SLAVE_APPSS] = &qhs_apss, + [SLAVE_CAMERA_CFG] = &qhs_camera_cfg, + [SLAVE_CLK_CTL] = &qhs_clk_ctl, + [SLAVE_CDSP_CFG] = &qhs_compute_dsp_cfg, + [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx, + [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg, + [SLAVE_DCC_CFG] = &qhs_dcc_cfg, + [SLAVE_CNOC_DDRSS] = &qhs_ddrss_cfg, + [SLAVE_DISPLAY_CFG] = &qhs_display_cfg, + [SLAVE_GLM] = &qhs_glm, + [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg, + [SLAVE_IMEM_CFG] = &qhs_imem_cfg, + [SLAVE_IPA_CFG] = &qhs_ipa, + [SLAVE_LLCC_CFG] = &qhs_llcc, + [SLAVE_MSS_PROC_MS_MPU_CFG] = &qhs_mdsp_ms_mpu_cfg, + [SLAVE_MEM_NOC_CFG] = &qhs_memnoc, + [SLAVE_CNOC_MNOC_CFG] = &qhs_mnoc_cfg, + [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg, + [SLAVE_PCIE_1_CFG] = &qhs_pcie_gen3_cfg, + [SLAVE_PDM] = &qhs_pdm, + [SLAVE_SOUTH_PHY_CFG] = &qhs_phy_refgen_south, + [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg, + [SLAVE_PRNG] = &qhs_prng, + [SLAVE_QDSS_CFG] = &qhs_qdss_cfg, + [SLAVE_BLSP_2] = &qhs_qupv3_north, + [SLAVE_BLSP_1] = &qhs_qupv3_south, + [SLAVE_SDCC_2] = &qhs_sdc2, + [SLAVE_SDCC_4] = &qhs_sdc4, + [SLAVE_SNOC_CFG] = &qhs_snoc_cfg, + [SLAVE_SPDM_WRAPPER] = &qhs_spdm, + [SLAVE_SPSS_CFG] = &qhs_spss_cfg, + [SLAVE_TCSR] = &qhs_tcsr, + [SLAVE_TLMM_NORTH] = &qhs_tlmm_north, + [SLAVE_TLMM_SOUTH] = &qhs_tlmm_south, + [SLAVE_TSIF] = &qhs_tsif, + [SLAVE_UFS_CARD_CFG] = &qhs_ufs_card_cfg, + [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg, + [SLAVE_USB3_0] = &qhs_usb3_0, + [SLAVE_USB3_1] = &qhs_usb3_1, + [SLAVE_VENUS_CFG] = &qhs_venus_cfg, + [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg, + [SLAVE_MNOC_SF_MEM_NOC] = &qns2_mem_noc, + [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc, + [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc, + [SLAVE_MEM_NOC_GNOC] = &qns_apps_io, + [SLAVE_CAMNOC_UNCOMP] = &qns_camnoc_uncomp, + [SLAVE_SNOC_CNOC] = &qns_cnoc, + [SLAVE_CNOC_A2NOC] = &qns_cnoc_a2noc, + [SLAVE_GNOC_SNOC] = &qns_gladiator_sodv, + [SLAVE_GNOC_MEM_NOC] = &qns_gnoc_memnoc, + [SLAVE_LLCC] = &qns_llcc, + [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf, + [SLAVE_SNOC_MEM_NOC_GC] = &qns_memnoc_gc, + [SLAVE_SNOC_MEM_NOC_SF] = &qns_memnoc_sf, + [SLAVE_MEM_NOC_SNOC] = &qns_memnoc_snoc, + [SLAVE_ANOC_PCIE_A1NOC_SNOC] = &qns_pcie_a1noc_snoc, + [SLAVE_ANOC_PCIE_SNOC] = &qns_pcie_snoc, + [SLAVE_IMEM] = &qxs_imem, + [SLAVE_PCIE_0] = &qxs_pcie, + [SLAVE_PCIE_1] = &qxs_pcie_gen3, + [SLAVE_PIMEM] = &qxs_pimem, + [SLAVE_SERVICE_A1NOC] = &srvc_aggre1_noc, + [SLAVE_SERVICE_A2NOC] = &srvc_aggre2_noc, + [SLAVE_SERVICE_CNOC] = &srvc_cnoc, + [SLAVE_SERVICE_GNOC] = &srvc_gnoc, + [SLAVE_SERVICE_MEM_NOC] = &srvc_memnoc, + [SLAVE_SERVICE_MNOC] = &srvc_mnoc, + [SLAVE_SERVICE_SNOC] = &srvc_snoc, + [SLAVE_QDSS_STM] = &xs_qdss_stm, + [SLAVE_TCU] = &xs_sys_tcu_cfg, +}; + +static struct qcom_icc_bcm *rsc_hlos_bcms[] = { + &bcm_acv, + &bcm_mc0, + &bcm_sh0, + &bcm_mm0, + &bcm_sh1, + &bcm_mm1, + &bcm_sh2, + &bcm_mm2, + &bcm_sh3, + &bcm_mm3, + &bcm_sh5, + &bcm_sn0, + &bcm_ce0, + &bcm_cn0, + &bcm_qup0, + &bcm_sn1, + &bcm_sn2, + &bcm_sn3, + &bcm_sn4, + &bcm_sn5, + &bcm_sn6, + &bcm_sn7, + &bcm_sn8, + &bcm_sn9, + &bcm_sn11, + &bcm_sn12, + &bcm_sn14, + &bcm_sn15, +}; + +static struct qcom_icc_desc sdm845_rsc_hlos = { + .nodes = rsc_hlos_nodes, + .num_nodes = ARRAY_SIZE(rsc_hlos_nodes), + .bcms = rsc_hlos_bcms, + .num_bcms = ARRAY_SIZE(rsc_hlos_bcms), +}; + +static int qcom_icc_bcm_init(struct qcom_icc_bcm *bcm, struct device *dev) +{ + struct qcom_icc_node *qn; + const struct bcm_db *data; + size_t data_count; + int i; + + bcm->addr = cmd_db_read_addr(bcm->name); + if (!bcm->addr) { + dev_err(dev, "%s could not find RPMh address\n", + bcm->name); + return -EINVAL; + } + + data = cmd_db_read_aux_data(bcm->name, &data_count); + if (IS_ERR(data)) { + dev_err(dev, "%s command db read error (%ld)\n", + bcm->name, PTR_ERR(data)); + return PTR_ERR(data); + } + if (!data_count) { + dev_err(dev, "%s command db missing or partial aux data\n", + bcm->name); + return -EINVAL; + } + + bcm->aux_data.unit = le32_to_cpu(data->unit); + bcm->aux_data.width = le16_to_cpu(data->width); + bcm->aux_data.vcd = data->vcd; + bcm->aux_data.reserved = data->reserved; + + /* + * Link Qnodes to their respective BCMs + */ + for (i = 0; i < bcm->num_nodes; i++) { + qn = bcm->nodes[i]; + qn->bcms[qn->num_bcms] = bcm; + qn->num_bcms++; + } + + return 0; +} + +inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y, + u32 addr, bool commit) +{ + bool valid = true; + + if (!cmd) + return; + + if (vote_x == 0 && vote_y == 0) + valid = false; + + if (vote_x > BCM_TCS_CMD_VOTE_MASK) + vote_x = BCM_TCS_CMD_VOTE_MASK; + + if (vote_y > BCM_TCS_CMD_VOTE_MASK) + vote_y = BCM_TCS_CMD_VOTE_MASK; + + cmd->addr = addr; + cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y); + + /* + * Set the wait for completion flag on command that need to be completed + * before the next command. + */ + if (commit) + cmd->wait = true; +} + +static void tcs_list_gen(struct list_head *bcm_list, + struct tcs_cmd tcs_list[SDM845_MAX_VCD], + int n[SDM845_MAX_VCD]) +{ + struct qcom_icc_bcm *bcm; + bool commit; + size_t idx = 0, batch = 0, cur_vcd_size = 0; + + memset(n, 0, sizeof(int) * SDM845_MAX_VCD); + + list_for_each_entry(bcm, bcm_list, list) { + commit = false; + cur_vcd_size++; + if ((list_is_last(&bcm->list, bcm_list)) || + bcm->aux_data.vcd != list_next_entry(bcm, list)->aux_data.vcd) { + commit = true; + cur_vcd_size = 0; + } + tcs_cmd_gen(&tcs_list[idx], bcm->vote_x, bcm->vote_y, + bcm->addr, commit); + idx++; + n[batch]++; + /* + * Batch the BCMs in such a way that we do not split them in + * multiple payloads when they are under the same VCD. This is + * to ensure that every BCM is committed since we only set the + * commit bit on the last BCM request of every VCD. + */ + if (n[batch] >= MAX_RPMH_PAYLOAD) { + if (!commit) { + n[batch] -= cur_vcd_size; + n[batch + 1] = cur_vcd_size; + } + batch++; + } + } +} + +static void bcm_aggregate(struct qcom_icc_bcm *bcm) +{ + size_t i; + u64 agg_avg = 0; + u64 agg_peak = 0; + u64 temp; + + for (i = 0; i < bcm->num_nodes; i++) { + temp = bcm->nodes[i]->sum_avg * bcm->aux_data.width; + do_div(temp, bcm->nodes[i]->buswidth * bcm->nodes[i]->channels); + agg_avg = max(agg_avg, temp); + + temp = bcm->nodes[i]->max_peak * bcm->aux_data.width; + do_div(temp, bcm->nodes[i]->buswidth); + agg_peak = max(agg_peak, temp); + } + + temp = agg_avg * 1000ULL; + do_div(temp, bcm->aux_data.unit); + bcm->vote_x = temp; + + temp = agg_peak * 1000ULL; + do_div(temp, bcm->aux_data.unit); + bcm->vote_y = temp; + + if (bcm->keepalive && bcm->vote_x == 0 && bcm->vote_y == 0) { + bcm->vote_x = 1; + bcm->vote_y = 1; + } + + bcm->dirty = false; +} + +static int qcom_icc_aggregate(struct icc_node *node, u32 avg_bw, + u32 peak_bw, u32 *agg_avg, u32 *agg_peak) +{ + size_t i; + struct qcom_icc_node *qn; + + qn = node->data; + + *agg_avg += avg_bw; + *agg_peak = max_t(u32, *agg_peak, peak_bw); + + qn->sum_avg = *agg_avg; + qn->max_peak = *agg_peak; + + for (i = 0; i < qn->num_bcms; i++) + qn->bcms[i]->dirty = true; + + return 0; +} + +static int qcom_icc_set(struct icc_node *src, struct icc_node *dst) +{ + struct qcom_icc_provider *qp; + struct icc_node *node; + struct tcs_cmd cmds[SDM845_MAX_BCMS]; + struct list_head commit_list; + int commit_idx[SDM845_MAX_VCD]; + int ret = 0, i; + + if (!src) + node = dst; + else + node = src; + + qp = to_qcom_provider(node->provider); + + INIT_LIST_HEAD(&commit_list); + + for (i = 0; i < qp->num_bcms; i++) { + if (qp->bcms[i]->dirty) { + bcm_aggregate(qp->bcms[i]); + list_add_tail(&qp->bcms[i]->list, &commit_list); + } + } + + /* + * Construct the command list based on a pre ordered list of BCMs + * based on VCD. + */ + tcs_list_gen(&commit_list, cmds, commit_idx); + + if (!commit_idx[0]) + return ret; + + ret = rpmh_invalidate(qp->dev); + if (ret) { + pr_err("Error invalidating RPMH client (%d)\n", ret); + return ret; + } + + ret = rpmh_write_batch(qp->dev, RPMH_ACTIVE_ONLY_STATE, + cmds, commit_idx); + if (ret) { + pr_err("Error sending AMC RPMH requests (%d)\n", ret); + return ret; + } + + return ret; +} + +static int cmp_vcd(const void *_l, const void *_r) +{ + const struct qcom_icc_bcm **l = (const struct qcom_icc_bcm **)_l; + const struct qcom_icc_bcm **r = (const struct qcom_icc_bcm **)_r; + + if (l[0]->aux_data.vcd < r[0]->aux_data.vcd) + return -1; + else if (l[0]->aux_data.vcd == r[0]->aux_data.vcd) + return 0; + else + return 1; +} + +static int qnoc_probe(struct platform_device *pdev) +{ + const struct qcom_icc_desc *desc; + struct icc_onecell_data *data; + struct icc_provider *provider; + struct qcom_icc_node **qnodes; + struct qcom_icc_provider *qp; + struct icc_node *node; + size_t num_nodes, i; + int ret; + + desc = of_device_get_match_data(&pdev->dev); + if (!desc) + return -EINVAL; + + qnodes = desc->nodes; + num_nodes = desc->num_nodes; + + qp = devm_kzalloc(&pdev->dev, sizeof(*qp), GFP_KERNEL); + if (!qp) + return -ENOMEM; + + data = devm_kcalloc(&pdev->dev, num_nodes, sizeof(*node), GFP_KERNEL); + if (!data) + return -ENOMEM; + + provider = &qp->provider; + provider->dev = &pdev->dev; + provider->set = qcom_icc_set; + provider->aggregate = qcom_icc_aggregate; + provider->xlate = of_icc_xlate_onecell; + INIT_LIST_HEAD(&provider->nodes); + provider->data = data; + + qp->dev = &pdev->dev; + qp->bcms = desc->bcms; + qp->num_bcms = desc->num_bcms; + + ret = icc_provider_add(provider); + if (ret) { + dev_err(&pdev->dev, "error adding interconnect provider\n"); + return ret; + } + + for (i = 0; i < num_nodes; i++) { + size_t j; + + node = icc_node_create(qnodes[i]->id); + if (IS_ERR(node)) { + ret = PTR_ERR(node); + goto err; + } + + node->name = qnodes[i]->name; + node->data = qnodes[i]; + icc_node_add(node, provider); + + dev_dbg(&pdev->dev, "registered node %p %s %d\n", node, + qnodes[i]->name, node->id); + + /* populate links */ + for (j = 0; j < qnodes[i]->num_links; j++) + icc_link_create(node, qnodes[i]->links[j]); + + data->nodes[i] = node; + } + data->num_nodes = num_nodes; + + for (i = 0; i < qp->num_bcms; i++) + qcom_icc_bcm_init(qp->bcms[i], &pdev->dev); + + /* + * Pre sort the BCMs based on VCD for ease of generating a command list + * that groups the BCMs with the same VCD together. VCDs are numbered + * with lowest being the most expensive time wise, ensuring that + * those commands are being sent the earliest in the queue. + */ + sort(qp->bcms, qp->num_bcms, sizeof(*qp->bcms), cmp_vcd, NULL); + + platform_set_drvdata(pdev, qp); + + dev_dbg(&pdev->dev, "Registered SDM845 ICC\n"); + + return ret; +err: + list_for_each_entry(node, &provider->nodes, node_list) { + icc_node_del(node); + icc_node_destroy(node->id); + } + + icc_provider_del(provider); + return ret; +} + +static int qnoc_remove(struct platform_device *pdev) +{ + struct qcom_icc_provider *qp = platform_get_drvdata(pdev); + struct icc_provider *provider = &qp->provider; + struct icc_node *n; + + list_for_each_entry(n, &provider->nodes, node_list) { + icc_node_del(n); + icc_node_destroy(n->id); + } + + return icc_provider_del(provider); +} + +static const struct of_device_id qnoc_of_match[] = { + { .compatible = "qcom,sdm845-rsc-hlos", .data = &sdm845_rsc_hlos }, + { }, +}; +MODULE_DEVICE_TABLE(of, qnoc_of_match); + +static struct platform_driver qnoc_driver = { + .probe = qnoc_probe, + .remove = qnoc_remove, + .driver = { + .name = "qnoc-sdm845", + .of_match_table = qnoc_of_match, + }, +}; +module_platform_driver(qnoc_driver); + +MODULE_AUTHOR("David Dai "); +MODULE_DESCRIPTION("Qualcomm sdm845 NoC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/include/dt-bindings/interconnect/qcom,sdm845.h b/include/dt-bindings/interconnect/qcom,sdm845.h new file mode 100644 index 000000000000..7b2393be7361 --- /dev/null +++ b/include/dt-bindings/interconnect/qcom,sdm845.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Qualcomm SDM845 interconnect IDs + * + * Copyright (c) 2018, Linaro Ltd. + * Author: Georgi Djakov + */ + +#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_SDM845_H +#define __DT_BINDINGS_INTERCONNECT_QCOM_SDM845_H + +#define MASTER_A1NOC_CFG 0 +#define MASTER_BLSP_1 1 +#define MASTER_TSIF 2 +#define MASTER_SDCC_2 3 +#define MASTER_SDCC_4 4 +#define MASTER_UFS_CARD 5 +#define MASTER_UFS_MEM 6 +#define MASTER_PCIE_0 7 +#define MASTER_A2NOC_CFG 8 +#define MASTER_QDSS_BAM 9 +#define MASTER_BLSP_2 10 +#define MASTER_CNOC_A2NOC 11 +#define MASTER_CRYPTO 12 +#define MASTER_IPA 13 +#define MASTER_PCIE_1 14 +#define MASTER_QDSS_ETR 15 +#define MASTER_USB3_0 16 +#define MASTER_USB3_1 17 +#define MASTER_CAMNOC_HF0_UNCOMP 18 +#define MASTER_CAMNOC_HF1_UNCOMP 19 +#define MASTER_CAMNOC_SF_UNCOMP 20 +#define MASTER_SPDM 21 +#define MASTER_TIC 22 +#define MASTER_SNOC_CNOC 23 +#define MASTER_QDSS_DAP 24 +#define MASTER_CNOC_DC_NOC 25 +#define MASTER_APPSS_PROC 26 +#define MASTER_GNOC_CFG 27 +#define MASTER_LLCC 28 +#define MASTER_TCU_0 29 +#define MASTER_MEM_NOC_CFG 30 +#define MASTER_GNOC_MEM_NOC 31 +#define MASTER_MNOC_HF_MEM_NOC 32 +#define MASTER_MNOC_SF_MEM_NOC 33 +#define MASTER_SNOC_GC_MEM_NOC 34 +#define MASTER_SNOC_SF_MEM_NOC 35 +#define MASTER_GFX3D 36 +#define MASTER_CNOC_MNOC_CFG 37 +#define MASTER_CAMNOC_HF0 38 +#define MASTER_CAMNOC_HF1 39 +#define MASTER_CAMNOC_SF 40 +#define MASTER_MDP0 41 +#define MASTER_MDP1 42 +#define MASTER_ROTATOR 43 +#define MASTER_VIDEO_P0 44 +#define MASTER_VIDEO_P1 45 +#define MASTER_VIDEO_PROC 46 +#define MASTER_SNOC_CFG 47 +#define MASTER_A1NOC_SNOC 48 +#define MASTER_A2NOC_SNOC 49 +#define MASTER_GNOC_SNOC 50 +#define MASTER_MEM_NOC_SNOC 51 +#define MASTER_ANOC_PCIE_SNOC 52 +#define MASTER_PIMEM 53 +#define MASTER_GIC 54 +#define SLAVE_A1NOC_SNOC 55 +#define SLAVE_SERVICE_A1NOC 56 +#define SLAVE_ANOC_PCIE_A1NOC_SNOC 57 +#define SLAVE_A2NOC_SNOC 58 +#define SLAVE_ANOC_PCIE_SNOC 59 +#define SLAVE_SERVICE_A2NOC 60 +#define SLAVE_CAMNOC_UNCOMP 61 +#define SLAVE_A1NOC_CFG 62 +#define SLAVE_A2NOC_CFG 63 +#define SLAVE_AOP 64 +#define SLAVE_AOSS 65 +#define SLAVE_CAMERA_CFG 66 +#define SLAVE_CLK_CTL 67 +#define SLAVE_CDSP_CFG 68 +#define SLAVE_RBCPR_CX_CFG 69 +#define SLAVE_CRYPTO_0_CFG 70 +#define SLAVE_DCC_CFG 71 +#define SLAVE_CNOC_DDRSS 72 +#define SLAVE_DISPLAY_CFG 73 +#define SLAVE_GLM 74 +#define SLAVE_GFX3D_CFG 75 +#define SLAVE_IMEM_CFG 76 +#define SLAVE_IPA_CFG 77 +#define SLAVE_CNOC_MNOC_CFG 78 +#define SLAVE_PCIE_0_CFG 79 +#define SLAVE_PCIE_1_CFG 80 +#define SLAVE_PDM 81 +#define SLAVE_SOUTH_PHY_CFG 82 +#define SLAVE_PIMEM_CFG 83 +#define SLAVE_PRNG 84 +#define SLAVE_QDSS_CFG 85 +#define SLAVE_BLSP_2 86 +#define SLAVE_BLSP_1 87 +#define SLAVE_SDCC_2 88 +#define SLAVE_SDCC_4 89 +#define SLAVE_SNOC_CFG 90 +#define SLAVE_SPDM_WRAPPER 91 +#define SLAVE_SPSS_CFG 92 +#define SLAVE_TCSR 93 +#define SLAVE_TLMM_NORTH 94 +#define SLAVE_TLMM_SOUTH 95 +#define SLAVE_TSIF 96 +#define SLAVE_UFS_CARD_CFG 97 +#define SLAVE_UFS_MEM_CFG 98 +#define SLAVE_USB3_0 99 +#define SLAVE_USB3_1 100 +#define SLAVE_VENUS_CFG 101 +#define SLAVE_VSENSE_CTRL_CFG 102 +#define SLAVE_CNOC_A2NOC 103 +#define SLAVE_SERVICE_CNOC 104 +#define SLAVE_LLCC_CFG 105 +#define SLAVE_MEM_NOC_CFG 106 +#define SLAVE_GNOC_SNOC 107 +#define SLAVE_GNOC_MEM_NOC 108 +#define SLAVE_SERVICE_GNOC 109 +#define SLAVE_EBI1 110 +#define SLAVE_MSS_PROC_MS_MPU_CFG 111 +#define SLAVE_MEM_NOC_GNOC 112 +#define SLAVE_LLCC 113 +#define SLAVE_MEM_NOC_SNOC 114 +#define SLAVE_SERVICE_MEM_NOC 115 +#define SLAVE_MNOC_SF_MEM_NOC 116 +#define SLAVE_MNOC_HF_MEM_NOC 117 +#define SLAVE_SERVICE_MNOC 118 +#define SLAVE_APPSS 119 +#define SLAVE_SNOC_CNOC 120 +#define SLAVE_SNOC_MEM_NOC_GC 121 +#define SLAVE_SNOC_MEM_NOC_SF 122 +#define SLAVE_IMEM 123 +#define SLAVE_PCIE_0 124 +#define SLAVE_PCIE_1 125 +#define SLAVE_PIMEM 126 +#define SLAVE_SERVICE_SNOC 127 +#define SLAVE_QDSS_STM 128 +#define SLAVE_TCU 129 + +#endif -- cgit v1.2.3-71-gd317 From 79bf0cbd86ac4887a7ac897fec8f011a763e23ba Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Wed, 16 Jan 2019 11:25:20 -0700 Subject: iomap: introduce io{read|write}64_{lo_hi|hi_lo} In order to provide non-atomic functions for io{read|write}64 that will use readq and writeq when appropriate. We define a number of variants of these functions in the generic iomap that will do non-atomic operations on pio but atomic operations on mmio. These functions are only defined if readq and writeq are defined. If they are not, then the wrappers that always use non-atomic operations from include/linux/io-64-nonatomic*.h will be used. Signed-off-by: Logan Gunthorpe Reviewed-by: Andy Shevchenko Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Cc: Michael Ellerman Cc: Arnd Bergmann Cc: Suresh Warrier Cc: Nicholas Piggin Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/include/asm/io.h | 2 + include/asm-generic/iomap.h | 22 +++++++ lib/iomap.c | 132 ++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 156 insertions(+) (limited to 'include') diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 7f19fbd3ba55..4b73847e9b95 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -783,8 +783,10 @@ extern void __iounmap_at(void *ea, unsigned long size); #define mmio_read16be(addr) readw_be(addr) #define mmio_read32be(addr) readl_be(addr) +#define mmio_read64be(addr) readq_be(addr) #define mmio_write16be(val, addr) writew_be(val, addr) #define mmio_write32be(val, addr) writel_be(val, addr) +#define mmio_write64be(val, addr) writeq_be(val, addr) #define mmio_insb(addr, dst, count) readsb(addr, dst, count) #define mmio_insw(addr, dst, count) readsw(addr, dst, count) #define mmio_insl(addr, dst, count) readsl(addr, dst, count) diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h index 5b63b94ef6b5..a008f504a2d0 100644 --- a/include/asm-generic/iomap.h +++ b/include/asm-generic/iomap.h @@ -36,6 +36,17 @@ extern u64 ioread64(void __iomem *); extern u64 ioread64be(void __iomem *); #endif +#ifdef readq +#define ioread64_lo_hi ioread64_lo_hi +#define ioread64_hi_lo ioread64_hi_lo +#define ioread64be_lo_hi ioread64be_lo_hi +#define ioread64be_hi_lo ioread64be_hi_lo +extern u64 ioread64_lo_hi(void __iomem *addr); +extern u64 ioread64_hi_lo(void __iomem *addr); +extern u64 ioread64be_lo_hi(void __iomem *addr); +extern u64 ioread64be_hi_lo(void __iomem *addr); +#endif + extern void iowrite8(u8, void __iomem *); extern void iowrite16(u16, void __iomem *); extern void iowrite16be(u16, void __iomem *); @@ -46,6 +57,17 @@ extern void iowrite64(u64, void __iomem *); extern void iowrite64be(u64, void __iomem *); #endif +#ifdef writeq +#define iowrite64_lo_hi iowrite64_lo_hi +#define iowrite64_hi_lo iowrite64_hi_lo +#define iowrite64be_lo_hi iowrite64be_lo_hi +#define iowrite64be_hi_lo iowrite64be_hi_lo +extern void iowrite64_lo_hi(u64 val, void __iomem *addr); +extern void iowrite64_hi_lo(u64 val, void __iomem *addr); +extern void iowrite64be_lo_hi(u64 val, void __iomem *addr); +extern void iowrite64be_hi_lo(u64 val, void __iomem *addr); +#endif + /* * "string" versions of the above. Note that they * use native byte ordering for the accesses (on diff --git a/lib/iomap.c b/lib/iomap.c index 2c293b22569f..e909ab71e995 100644 --- a/lib/iomap.c +++ b/lib/iomap.c @@ -67,6 +67,7 @@ static void bad_io_access(unsigned long port, const char *access) #ifndef mmio_read16be #define mmio_read16be(addr) swab16(readw(addr)) #define mmio_read32be(addr) swab32(readl(addr)) +#define mmio_read64be(addr) swab64(readq(addr)) #endif unsigned int ioread8(void __iomem *addr) @@ -100,6 +101,80 @@ EXPORT_SYMBOL(ioread16be); EXPORT_SYMBOL(ioread32); EXPORT_SYMBOL(ioread32be); +#ifdef readq +static u64 pio_read64_lo_hi(unsigned long port) +{ + u64 lo, hi; + + lo = inl(port); + hi = inl(port + sizeof(u32)); + + return lo | (hi << 32); +} + +static u64 pio_read64_hi_lo(unsigned long port) +{ + u64 lo, hi; + + hi = inl(port + sizeof(u32)); + lo = inl(port); + + return lo | (hi << 32); +} + +static u64 pio_read64be_lo_hi(unsigned long port) +{ + u64 lo, hi; + + lo = pio_read32be(port + sizeof(u32)); + hi = pio_read32be(port); + + return lo | (hi << 32); +} + +static u64 pio_read64be_hi_lo(unsigned long port) +{ + u64 lo, hi; + + hi = pio_read32be(port); + lo = pio_read32be(port + sizeof(u32)); + + return lo | (hi << 32); +} + +u64 ioread64_lo_hi(void __iomem *addr) +{ + IO_COND(addr, return pio_read64_lo_hi(port), return readq(addr)); + return 0xffffffffffffffffULL; +} + +u64 ioread64_hi_lo(void __iomem *addr) +{ + IO_COND(addr, return pio_read64_hi_lo(port), return readq(addr)); + return 0xffffffffffffffffULL; +} + +u64 ioread64be_lo_hi(void __iomem *addr) +{ + IO_COND(addr, return pio_read64be_lo_hi(port), + return mmio_read64be(addr)); + return 0xffffffffffffffffULL; +} + +u64 ioread64be_hi_lo(void __iomem *addr) +{ + IO_COND(addr, return pio_read64be_hi_lo(port), + return mmio_read64be(addr)); + return 0xffffffffffffffffULL; +} + +EXPORT_SYMBOL(ioread64_lo_hi); +EXPORT_SYMBOL(ioread64_hi_lo); +EXPORT_SYMBOL(ioread64be_lo_hi); +EXPORT_SYMBOL(ioread64be_hi_lo); + +#endif /* readq */ + #ifndef pio_write16be #define pio_write16be(val,port) outw(swab16(val),port) #define pio_write32be(val,port) outl(swab32(val),port) @@ -108,6 +183,7 @@ EXPORT_SYMBOL(ioread32be); #ifndef mmio_write16be #define mmio_write16be(val,port) writew(swab16(val),port) #define mmio_write32be(val,port) writel(swab32(val),port) +#define mmio_write64be(val,port) writeq(swab64(val),port) #endif void iowrite8(u8 val, void __iomem *addr) @@ -136,6 +212,62 @@ EXPORT_SYMBOL(iowrite16be); EXPORT_SYMBOL(iowrite32); EXPORT_SYMBOL(iowrite32be); +#ifdef writeq +static void pio_write64_lo_hi(u64 val, unsigned long port) +{ + outl(val, port); + outl(val >> 32, port + sizeof(u32)); +} + +static void pio_write64_hi_lo(u64 val, unsigned long port) +{ + outl(val >> 32, port + sizeof(u32)); + outl(val, port); +} + +static void pio_write64be_lo_hi(u64 val, unsigned long port) +{ + pio_write32be(val, port + sizeof(u32)); + pio_write32be(val >> 32, port); +} + +static void pio_write64be_hi_lo(u64 val, unsigned long port) +{ + pio_write32be(val >> 32, port); + pio_write32be(val, port + sizeof(u32)); +} + +void iowrite64_lo_hi(u64 val, void __iomem *addr) +{ + IO_COND(addr, pio_write64_lo_hi(val, port), + writeq(val, addr)); +} + +void iowrite64_hi_lo(u64 val, void __iomem *addr) +{ + IO_COND(addr, pio_write64_hi_lo(val, port), + writeq(val, addr)); +} + +void iowrite64be_lo_hi(u64 val, void __iomem *addr) +{ + IO_COND(addr, pio_write64be_lo_hi(val, port), + mmio_write64be(val, addr)); +} + +void iowrite64be_hi_lo(u64 val, void __iomem *addr) +{ + IO_COND(addr, pio_write64be_hi_lo(val, port), + mmio_write64be(val, addr)); +} + +EXPORT_SYMBOL(iowrite64_lo_hi); +EXPORT_SYMBOL(iowrite64_hi_lo); +EXPORT_SYMBOL(iowrite64be_lo_hi); +EXPORT_SYMBOL(iowrite64be_hi_lo); + +#endif /* readq */ + /* * These are the "repeat MMIO read/write" functions. * Note the "__raw" accesses, since we don't want to -- cgit v1.2.3-71-gd317 From c81d64d3dc1f2decf8f3a9354416b7496b5c389b Mon Sep 17 00:00:00 2001 From: Logan Gunthorpe Date: Wed, 16 Jan 2019 11:25:21 -0700 Subject: io-64-nonatomic: add io{read|write}64[be]{_lo_hi|_hi_lo} macros This patch adds generic io{read|write}64[be]{_lo_hi|_hi_lo} macros if they are not already defined by the architecture. (As they are provided by the generic iomap library). The patch also points io{read|write}64[be] to the variant specified by the header name. This is because new drivers are encouraged to use ioreadXX, et al instead of readX[1], et al -- and mixing ioreadXX with readq is pretty ugly. [1] LDD3: section 9.4.2 Signed-off-by: Logan Gunthorpe Reviewed-by: Andy Shevchenko Cc: Christoph Hellwig Cc: Arnd Bergmann Cc: Alan Cox Cc: Greg Kroah-Hartman Signed-off-by: Greg Kroah-Hartman --- include/linux/io-64-nonatomic-hi-lo.h | 64 +++++++++++++++++++++++++++++++++++ include/linux/io-64-nonatomic-lo-hi.h | 64 +++++++++++++++++++++++++++++++++++ 2 files changed, 128 insertions(+) (limited to 'include') diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h index 862d786a904f..ae21b72cce85 100644 --- a/include/linux/io-64-nonatomic-hi-lo.h +++ b/include/linux/io-64-nonatomic-hi-lo.h @@ -55,4 +55,68 @@ static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr) #define writeq_relaxed hi_lo_writeq_relaxed #endif +#ifndef ioread64_hi_lo +#define ioread64_hi_lo ioread64_hi_lo +static inline u64 ioread64_hi_lo(void __iomem *addr) +{ + u32 low, high; + + high = ioread32(addr + sizeof(u32)); + low = ioread32(addr); + + return low + ((u64)high << 32); +} +#endif + +#ifndef iowrite64_hi_lo +#define iowrite64_hi_lo iowrite64_hi_lo +static inline void iowrite64_hi_lo(u64 val, void __iomem *addr) +{ + iowrite32(val >> 32, addr + sizeof(u32)); + iowrite32(val, addr); +} +#endif + +#ifndef ioread64be_hi_lo +#define ioread64be_hi_lo ioread64be_hi_lo +static inline u64 ioread64be_hi_lo(void __iomem *addr) +{ + u32 low, high; + + high = ioread32be(addr); + low = ioread32be(addr + sizeof(u32)); + + return low + ((u64)high << 32); +} +#endif + +#ifndef iowrite64be_hi_lo +#define iowrite64be_hi_lo iowrite64be_hi_lo +static inline void iowrite64be_hi_lo(u64 val, void __iomem *addr) +{ + iowrite32be(val >> 32, addr); + iowrite32be(val, addr + sizeof(u32)); +} +#endif + +#ifndef ioread64 +#define ioread64_is_nonatomic +#define ioread64 ioread64_hi_lo +#endif + +#ifndef iowrite64 +#define iowrite64_is_nonatomic +#define iowrite64 iowrite64_hi_lo +#endif + +#ifndef ioread64be +#define ioread64be_is_nonatomic +#define ioread64be ioread64be_hi_lo +#endif + +#ifndef iowrite64be +#define iowrite64be_is_nonatomic +#define iowrite64be iowrite64be_hi_lo +#endif + #endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */ diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h index d042e7bb5adb..faaa842dbdb9 100644 --- a/include/linux/io-64-nonatomic-lo-hi.h +++ b/include/linux/io-64-nonatomic-lo-hi.h @@ -55,4 +55,68 @@ static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr) #define writeq_relaxed lo_hi_writeq_relaxed #endif +#ifndef ioread64_lo_hi +#define ioread64_lo_hi ioread64_lo_hi +static inline u64 ioread64_lo_hi(void __iomem *addr) +{ + u32 low, high; + + low = ioread32(addr); + high = ioread32(addr + sizeof(u32)); + + return low + ((u64)high << 32); +} +#endif + +#ifndef iowrite64_lo_hi +#define iowrite64_lo_hi iowrite64_lo_hi +static inline void iowrite64_lo_hi(u64 val, void __iomem *addr) +{ + iowrite32(val, addr); + iowrite32(val >> 32, addr + sizeof(u32)); +} +#endif + +#ifndef ioread64be_lo_hi +#define ioread64be_lo_hi ioread64be_lo_hi +static inline u64 ioread64be_lo_hi(void __iomem *addr) +{ + u32 low, high; + + low = ioread32be(addr + sizeof(u32)); + high = ioread32be(addr); + + return low + ((u64)high << 32); +} +#endif + +#ifndef iowrite64be_lo_hi +#define iowrite64be_lo_hi iowrite64be_lo_hi +static inline void iowrite64be_lo_hi(u64 val, void __iomem *addr) +{ + iowrite32be(val, addr + sizeof(u32)); + iowrite32be(val >> 32, addr); +} +#endif + +#ifndef ioread64 +#define ioread64_is_nonatomic +#define ioread64 ioread64_lo_hi +#endif + +#ifndef iowrite64 +#define iowrite64_is_nonatomic +#define iowrite64 iowrite64_lo_hi +#endif + +#ifndef ioread64be +#define ioread64be_is_nonatomic +#define ioread64be ioread64be_lo_hi +#endif + +#ifndef iowrite64be +#define iowrite64be_is_nonatomic +#define iowrite64be iowrite64be_lo_hi +#endif + #endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */ -- cgit v1.2.3-71-gd317 From ec74136ded792deed80780a2f8baf3521eeb72f9 Mon Sep 17 00:00:00 2001 From: Todd Kjos Date: Mon, 14 Jan 2019 09:10:21 -0800 Subject: binder: create node flag to request sender's security context To allow servers to verify client identity, allow a node flag to be set that causes the sender's security context to be delivered with the transaction. The BR_TRANSACTION command is extended in BR_TRANSACTION_SEC_CTX to contain a pointer to the security context string. Signed-off-by: Todd Kjos Reviewed-by: Joel Fernandes (Google) Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder.c | 106 ++++++++++++++++++++++++++++-------- include/uapi/linux/android/binder.h | 19 +++++++ 2 files changed, 102 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/drivers/android/binder.c b/drivers/android/binder.c index cdfc87629efb..5f6ef5e63b91 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -329,6 +329,8 @@ struct binder_error { * (invariant after initialized) * @min_priority: minimum scheduling priority * (invariant after initialized) + * @txn_security_ctx: require sender's security context + * (invariant after initialized) * @async_todo: list of async work items * (protected by @proc->inner_lock) * @@ -365,6 +367,7 @@ struct binder_node { * invariant after initialization */ u8 accept_fds:1; + u8 txn_security_ctx:1; u8 min_priority; }; bool has_async_transaction; @@ -615,6 +618,7 @@ struct binder_transaction { long saved_priority; kuid_t sender_euid; struct list_head fd_fixups; + binder_uintptr_t security_ctx; /** * @lock: protects @from, @to_proc, and @to_thread * @@ -1152,6 +1156,7 @@ static struct binder_node *binder_init_node_ilocked( node->work.type = BINDER_WORK_NODE; node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); + node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX); spin_lock_init(&node->lock); INIT_LIST_HEAD(&node->work.entry); INIT_LIST_HEAD(&node->async_todo); @@ -2778,6 +2783,8 @@ static void binder_transaction(struct binder_proc *proc, binder_size_t last_fixup_min_off = 0; struct binder_context *context = proc->context; int t_debug_id = atomic_inc_return(&binder_last_id); + char *secctx = NULL; + u32 secctx_sz = 0; e = binder_transaction_log_add(&binder_transaction_log); e->debug_id = t_debug_id; @@ -3020,6 +3027,20 @@ static void binder_transaction(struct binder_proc *proc, t->flags = tr->flags; t->priority = task_nice(current); + if (target_node && target_node->txn_security_ctx) { + u32 secid; + + security_task_getsecid(proc->tsk, &secid); + ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); + if (ret) { + return_error = BR_FAILED_REPLY; + return_error_param = ret; + return_error_line = __LINE__; + goto err_get_secctx_failed; + } + extra_buffers_size += ALIGN(secctx_sz, sizeof(u64)); + } + trace_binder_transaction(reply, t, target_node); t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, @@ -3036,6 +3057,19 @@ static void binder_transaction(struct binder_proc *proc, t->buffer = NULL; goto err_binder_alloc_buf_failed; } + if (secctx) { + size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + + ALIGN(tr->offsets_size, sizeof(void *)) + + ALIGN(extra_buffers_size, sizeof(void *)) - + ALIGN(secctx_sz, sizeof(u64)); + char *kptr = t->buffer->data + buf_offset; + + t->security_ctx = (uintptr_t)kptr + + binder_alloc_get_user_buffer_offset(&target_proc->alloc); + memcpy(kptr, secctx, secctx_sz); + security_release_secctx(secctx, secctx_sz); + secctx = NULL; + } t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; t->buffer->target_node = target_node; @@ -3305,6 +3339,9 @@ err_copy_data_failed: t->buffer->transaction = NULL; binder_alloc_free_buf(&target_proc->alloc, t->buffer); err_binder_alloc_buf_failed: + if (secctx) + security_release_secctx(secctx, secctx_sz); +err_get_secctx_failed: kfree(tcomplete); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); err_alloc_tcomplete_failed: @@ -4036,11 +4073,13 @@ retry: while (1) { uint32_t cmd; - struct binder_transaction_data tr; + struct binder_transaction_data_secctx tr; + struct binder_transaction_data *trd = &tr.transaction_data; struct binder_work *w = NULL; struct list_head *list = NULL; struct binder_transaction *t = NULL; struct binder_thread *t_from; + size_t trsize = sizeof(*trd); binder_inner_proc_lock(proc); if (!binder_worklist_empty_ilocked(&thread->todo)) @@ -4240,8 +4279,8 @@ retry: if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; - tr.target.ptr = target_node->ptr; - tr.cookie = target_node->cookie; + trd->target.ptr = target_node->ptr; + trd->cookie = target_node->cookie; t->saved_priority = task_nice(current); if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY)) @@ -4251,22 +4290,23 @@ retry: binder_set_nice(target_node->min_priority); cmd = BR_TRANSACTION; } else { - tr.target.ptr = 0; - tr.cookie = 0; + trd->target.ptr = 0; + trd->cookie = 0; cmd = BR_REPLY; } - tr.code = t->code; - tr.flags = t->flags; - tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid); + trd->code = t->code; + trd->flags = t->flags; + trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid); t_from = binder_get_txn_from(t); if (t_from) { struct task_struct *sender = t_from->proc->tsk; - tr.sender_pid = task_tgid_nr_ns(sender, - task_active_pid_ns(current)); + trd->sender_pid = + task_tgid_nr_ns(sender, + task_active_pid_ns(current)); } else { - tr.sender_pid = 0; + trd->sender_pid = 0; } ret = binder_apply_fd_fixups(t); @@ -4297,15 +4337,20 @@ retry: } continue; } - tr.data_size = t->buffer->data_size; - tr.offsets_size = t->buffer->offsets_size; - tr.data.ptr.buffer = (binder_uintptr_t) + trd->data_size = t->buffer->data_size; + trd->offsets_size = t->buffer->offsets_size; + trd->data.ptr.buffer = (binder_uintptr_t) ((uintptr_t)t->buffer->data + binder_alloc_get_user_buffer_offset(&proc->alloc)); - tr.data.ptr.offsets = tr.data.ptr.buffer + + trd->data.ptr.offsets = trd->data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); + tr.secctx = t->security_ctx; + if (t->security_ctx) { + cmd = BR_TRANSACTION_SEC_CTX; + trsize = sizeof(tr); + } if (put_user(cmd, (uint32_t __user *)ptr)) { if (t_from) binder_thread_dec_tmpref(t_from); @@ -4316,7 +4361,7 @@ retry: return -EFAULT; } ptr += sizeof(uint32_t); - if (copy_to_user(ptr, &tr, sizeof(tr))) { + if (copy_to_user(ptr, &tr, trsize)) { if (t_from) binder_thread_dec_tmpref(t_from); @@ -4325,7 +4370,7 @@ retry: return -EFAULT; } - ptr += sizeof(tr); + ptr += trsize; trace_binder_transaction_received(t); binder_stat_br(proc, thread, cmd); @@ -4333,16 +4378,18 @@ retry: "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n", proc->pid, thread->pid, (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : - "BR_REPLY", + (cmd == BR_TRANSACTION_SEC_CTX) ? + "BR_TRANSACTION_SEC_CTX" : "BR_REPLY", t->debug_id, t_from ? t_from->proc->pid : 0, t_from ? t_from->pid : 0, cmd, t->buffer->data_size, t->buffer->offsets_size, - (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets); + (u64)trd->data.ptr.buffer, + (u64)trd->data.ptr.offsets); if (t_from) binder_thread_dec_tmpref(t_from); t->buffer->allow_user_free = 1; - if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) { + if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) { binder_inner_proc_lock(thread->proc); t->to_parent = thread->transaction_stack; t->to_thread = thread; @@ -4690,7 +4737,8 @@ out: return ret; } -static int binder_ioctl_set_ctx_mgr(struct file *filp) +static int binder_ioctl_set_ctx_mgr(struct file *filp, + struct flat_binder_object *fbo) { int ret = 0; struct binder_proc *proc = filp->private_data; @@ -4719,7 +4767,7 @@ static int binder_ioctl_set_ctx_mgr(struct file *filp) } else { context->binder_context_mgr_uid = curr_euid; } - new_node = binder_new_node(proc, NULL); + new_node = binder_new_node(proc, fbo); if (!new_node) { ret = -ENOMEM; goto out; @@ -4842,8 +4890,20 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) binder_inner_proc_unlock(proc); break; } + case BINDER_SET_CONTEXT_MGR_EXT: { + struct flat_binder_object fbo; + + if (copy_from_user(&fbo, ubuf, sizeof(fbo))) { + ret = -EINVAL; + goto err; + } + ret = binder_ioctl_set_ctx_mgr(filp, &fbo); + if (ret) + goto err; + break; + } case BINDER_SET_CONTEXT_MGR: - ret = binder_ioctl_set_ctx_mgr(filp); + ret = binder_ioctl_set_ctx_mgr(filp, NULL); if (ret) goto err; break; diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h index b9ba520f7e4b..2832134e5397 100644 --- a/include/uapi/linux/android/binder.h +++ b/include/uapi/linux/android/binder.h @@ -41,6 +41,14 @@ enum { enum { FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff, FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100, + + /** + * @FLAT_BINDER_FLAG_TXN_SECURITY_CTX: request security contexts + * + * Only when set, causes senders to include their security + * context + */ + FLAT_BINDER_FLAG_TXN_SECURITY_CTX = 0x1000, }; #ifdef BINDER_IPC_32BIT @@ -218,6 +226,7 @@ struct binder_node_info_for_ref { #define BINDER_VERSION _IOWR('b', 9, struct binder_version) #define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info) #define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref) +#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object) /* * NOTE: Two special error codes you should check for when calling @@ -276,6 +285,11 @@ struct binder_transaction_data { } data; }; +struct binder_transaction_data_secctx { + struct binder_transaction_data transaction_data; + binder_uintptr_t secctx; +}; + struct binder_transaction_data_sg { struct binder_transaction_data transaction_data; binder_size_t buffers_size; @@ -311,6 +325,11 @@ enum binder_driver_return_protocol { BR_OK = _IO('r', 1), /* No parameters! */ + BR_TRANSACTION_SEC_CTX = _IOR('r', 2, + struct binder_transaction_data_secctx), + /* + * binder_transaction_data_secctx: the received command. + */ BR_TRANSACTION = _IOR('r', 2, struct binder_transaction_data), BR_REPLY = _IOR('r', 3, struct binder_transaction_data), /* -- cgit v1.2.3-71-gd317 From e11a5795cb7cd1e25bbd1697baa109943938c0f6 Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Tue, 5 Feb 2019 16:24:56 -0700 Subject: perf/aux: Make perf_event accessible to setup_aux() When pmu::setup_aux() is called the coresight PMU needs to know which sink to use for the session by looking up the information in the event's attr::config2 field. As such simply replace the cpu information by the complete perf_event structure and change all affected customers. Signed-off-by: Mathieu Poirier Reviewed-by: Suzuki K Poulose Acked-by: Peter Zijlstra (Intel) Signed-off-by: Greg Kroah-Hartman --- arch/s390/kernel/perf_cpum_sf.c | 6 +++--- arch/x86/events/intel/bts.c | 4 +++- arch/x86/events/intel/pt.c | 5 +++-- drivers/hwtracing/coresight/coresight-etm-perf.c | 6 +++--- drivers/perf/arm_spe_pmu.c | 6 +++--- include/linux/perf_event.h | 2 +- kernel/events/ring_buffer.c | 2 +- 7 files changed, 17 insertions(+), 14 deletions(-) (limited to 'include') diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index bfabeb1889cc..1266194afb02 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1600,7 +1600,7 @@ static void aux_sdb_init(unsigned long sdb) /* * aux_buffer_setup() - Setup AUX buffer for diagnostic mode sampling - * @cpu: On which to allocate, -1 means current + * @event: Event the buffer is setup for, event->cpu == -1 means current * @pages: Array of pointers to buffer pages passed from perf core * @nr_pages: Total pages * @snapshot: Flag for snapshot mode @@ -1612,8 +1612,8 @@ static void aux_sdb_init(unsigned long sdb) * * Return the private AUX buffer structure if success or NULL if fails. */ -static void *aux_buffer_setup(int cpu, void **pages, int nr_pages, - bool snapshot) +static void *aux_buffer_setup(struct perf_event *event, void **pages, + int nr_pages, bool snapshot) { struct sf_buffer *sfb; struct aux_buffer *aux; diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index a01ef1b0f883..7cdd7b13bbda 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -77,10 +77,12 @@ static size_t buf_size(struct page *page) } static void * -bts_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool overwrite) +bts_buffer_setup_aux(struct perf_event *event, void **pages, + int nr_pages, bool overwrite) { struct bts_buffer *buf; struct page *page; + int cpu = event->cpu; int node = (cpu == -1) ? cpu : cpu_to_node(cpu); unsigned long offset; size_t size = nr_pages << PAGE_SHIFT; diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 9494ca68fd9d..c0e86ff21f81 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -1114,10 +1114,11 @@ static int pt_buffer_init_topa(struct pt_buffer *buf, unsigned long nr_pages, * Return: Our private PT buffer structure. */ static void * -pt_buffer_setup_aux(int cpu, void **pages, int nr_pages, bool snapshot) +pt_buffer_setup_aux(struct perf_event *event, void **pages, + int nr_pages, bool snapshot) { struct pt_buffer *buf; - int node, ret; + int node, ret, cpu = event->cpu; if (!nr_pages) return NULL; diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c index abe8249b893b..f21eb28b6782 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c @@ -177,15 +177,15 @@ static void etm_free_aux(void *data) schedule_work(&event_data->work); } -static void *etm_setup_aux(int event_cpu, void **pages, +static void *etm_setup_aux(struct perf_event *event, void **pages, int nr_pages, bool overwrite) { - int cpu; + int cpu = event->cpu; cpumask_t *mask; struct coresight_device *sink; struct etm_event_data *event_data = NULL; - event_data = alloc_event_data(event_cpu); + event_data = alloc_event_data(cpu); if (!event_data) return NULL; INIT_WORK(&event_data->work, free_event_data); diff --git a/drivers/perf/arm_spe_pmu.c b/drivers/perf/arm_spe_pmu.c index 8e46a9dad2fa..7cb766dafe85 100644 --- a/drivers/perf/arm_spe_pmu.c +++ b/drivers/perf/arm_spe_pmu.c @@ -824,10 +824,10 @@ static void arm_spe_pmu_read(struct perf_event *event) { } -static void *arm_spe_pmu_setup_aux(int cpu, void **pages, int nr_pages, - bool snapshot) +static void *arm_spe_pmu_setup_aux(struct perf_event *event, void **pages, + int nr_pages, bool snapshot) { - int i; + int i, cpu = event->cpu; struct page **pglist; struct arm_spe_pmu_buf *buf; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1d5c551a5add..3e49b2144808 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -409,7 +409,7 @@ struct pmu { /* * Set up pmu-private data structures for an AUX area */ - void *(*setup_aux) (int cpu, void **pages, + void *(*setup_aux) (struct perf_event *event, void **pages, int nr_pages, bool overwrite); /* optional */ diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index 4a9937076331..857308295f63 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -658,7 +658,7 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event, goto out; } - rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages, + rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages, overwrite); if (!rb->aux_priv) goto out; -- cgit v1.2.3-71-gd317 From 988036f9d322cbd787d8f6a776dbe903d05bae22 Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Tue, 5 Feb 2019 16:24:57 -0700 Subject: coresight: perf: Add "sinks" group to PMU directory Add a "sinks" directory entry so that users can see all the sinks available in the system in a single place. Individual sink are added as they are registered with the coresight bus. Signed-off-by: Mathieu Poirier Acked-by: Peter Zijlstra (Intel) Reviewed-by: Suzuki K Poulose Signed-off-by: Greg Kroah-Hartman --- drivers/hwtracing/coresight/coresight-etm-perf.c | 82 ++++++++++++++++++++++++ drivers/hwtracing/coresight/coresight-etm-perf.h | 6 +- drivers/hwtracing/coresight/coresight.c | 18 ++++++ include/linux/coresight.h | 7 +- 4 files changed, 110 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c index f21eb28b6782..cdbdb28dc175 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -43,8 +44,18 @@ static const struct attribute_group etm_pmu_format_group = { .attrs = etm_config_formats_attr, }; +static struct attribute *etm_config_sinks_attr[] = { + NULL, +}; + +static const struct attribute_group etm_pmu_sinks_group = { + .name = "sinks", + .attrs = etm_config_sinks_attr, +}; + static const struct attribute_group *etm_pmu_attr_groups[] = { &etm_pmu_format_group, + &etm_pmu_sinks_group, NULL, }; @@ -479,6 +490,77 @@ int etm_perf_symlink(struct coresight_device *csdev, bool link) return 0; } +static ssize_t etm_perf_sink_name_show(struct device *dev, + struct device_attribute *dattr, + char *buf) +{ + struct dev_ext_attribute *ea; + + ea = container_of(dattr, struct dev_ext_attribute, attr); + return scnprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)(ea->var)); +} + +int etm_perf_add_symlink_sink(struct coresight_device *csdev) +{ + int ret; + unsigned long hash; + const char *name; + struct device *pmu_dev = etm_pmu.dev; + struct device *pdev = csdev->dev.parent; + struct dev_ext_attribute *ea; + + if (csdev->type != CORESIGHT_DEV_TYPE_SINK && + csdev->type != CORESIGHT_DEV_TYPE_LINKSINK) + return -EINVAL; + + if (csdev->ea != NULL) + return -EINVAL; + + if (!etm_perf_up) + return -EPROBE_DEFER; + + ea = devm_kzalloc(pdev, sizeof(*ea), GFP_KERNEL); + if (!ea) + return -ENOMEM; + + name = dev_name(pdev); + /* See function coresight_get_sink_by_id() to know where this is used */ + hash = hashlen_hash(hashlen_string(NULL, name)); + + ea->attr.attr.name = devm_kstrdup(pdev, name, GFP_KERNEL); + if (!ea->attr.attr.name) + return -ENOMEM; + + ea->attr.attr.mode = 0444; + ea->attr.show = etm_perf_sink_name_show; + ea->var = (unsigned long *)hash; + + ret = sysfs_add_file_to_group(&pmu_dev->kobj, + &ea->attr.attr, "sinks"); + + if (!ret) + csdev->ea = ea; + + return ret; +} + +void etm_perf_del_symlink_sink(struct coresight_device *csdev) +{ + struct device *pmu_dev = etm_pmu.dev; + struct dev_ext_attribute *ea = csdev->ea; + + if (csdev->type != CORESIGHT_DEV_TYPE_SINK && + csdev->type != CORESIGHT_DEV_TYPE_LINKSINK) + return; + + if (!ea) + return; + + sysfs_remove_file_from_group(&pmu_dev->kobj, + &ea->attr.attr, "sinks"); + csdev->ea = NULL; +} + static int __init etm_perf_init(void) { int ret; diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.h b/drivers/hwtracing/coresight/coresight-etm-perf.h index da7d9336a15c..015213abe00a 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.h +++ b/drivers/hwtracing/coresight/coresight-etm-perf.h @@ -59,6 +59,8 @@ struct etm_event_data { #ifdef CONFIG_CORESIGHT int etm_perf_symlink(struct coresight_device *csdev, bool link); +int etm_perf_add_symlink_sink(struct coresight_device *csdev); +void etm_perf_del_symlink_sink(struct coresight_device *csdev); static inline void *etm_perf_sink_config(struct perf_output_handle *handle) { struct etm_event_data *data = perf_get_aux(handle); @@ -70,7 +72,9 @@ static inline void *etm_perf_sink_config(struct perf_output_handle *handle) #else static inline int etm_perf_symlink(struct coresight_device *csdev, bool link) { return -EINVAL; } - +int etm_perf_add_symlink_sink(struct coresight_device *csdev) +{ return -EINVAL; } +void etm_perf_del_symlink_sink(struct coresight_device *csdev) {} static inline void *etm_perf_sink_config(struct perf_output_handle *handle) { return NULL; diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c index 2b0df1a0a8df..d7fa90be6f42 100644 --- a/drivers/hwtracing/coresight/coresight.c +++ b/drivers/hwtracing/coresight/coresight.c @@ -18,6 +18,7 @@ #include #include +#include "coresight-etm-perf.h" #include "coresight-priv.h" static DEFINE_MUTEX(coresight_mutex); @@ -1167,6 +1168,22 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) goto err_out; } + if (csdev->type == CORESIGHT_DEV_TYPE_SINK || + csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) { + ret = etm_perf_add_symlink_sink(csdev); + + if (ret) { + device_unregister(&csdev->dev); + /* + * As with the above, all resources are free'd + * explicitly via coresight_device_release() triggered + * from put_device(), which is in turn called from + * function device_unregister(). + */ + goto err_out; + } + } + mutex_lock(&coresight_mutex); coresight_fixup_device_conns(csdev); @@ -1185,6 +1202,7 @@ EXPORT_SYMBOL_GPL(coresight_register); void coresight_unregister(struct coresight_device *csdev) { + etm_perf_del_symlink_sink(csdev); /* Remove references of that device in the topology */ coresight_remove_conns(csdev); device_unregister(&csdev->dev); diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 46c67a764877..7b87965f7a65 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -154,8 +154,9 @@ struct coresight_connection { * @orphan: true if the component has connections that haven't been linked. * @enable: 'true' if component is currently part of an active path. * @activated: 'true' only if a _sink_ has been activated. A sink can be - activated but not yet enabled. Enabling for a _sink_ - happens when a source has been selected for that it. + * activated but not yet enabled. Enabling for a _sink_ + * appens when a source has been selected for that it. + * @ea: Device attribute for sink representation under PMU directory. */ struct coresight_device { struct coresight_connection *conns; @@ -168,7 +169,9 @@ struct coresight_device { atomic_t *refcnt; bool orphan; bool enable; /* true only if configured as part of a path */ + /* sink specific fields */ bool activated; /* true only if a sink is part of a path */ + struct dev_ext_attribute *ea; }; #define to_coresight_device(d) container_of(d, struct coresight_device, dev) -- cgit v1.2.3-71-gd317 From 4d69c80e0d0fd8cf12d985841eb0fce5c29819ad Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 8 Feb 2019 00:27:56 +0100 Subject: component: Add documentation While typing these I think doing an s/component_master/aggregate/ would be useful: - it's shorter :-) - I think component/aggregate is much more meaningful naming than component/puppetmaster or something like that. At least to my English ear "aggregate" emphasizes much more the "assemble a pile of things into something bigger" aspect, and there's not really much of a control hierarchy between aggregate and constituing components. But that's way more than a quick doc typing exercise ... Thanks to Ram for commenting on an initial draft of these docs. v2: Review from Rafael: - git add Documenation/driver-api/component.rst - lots of polish to the wording + spelling fixes. v3: Review from Russell: - s/framework/helper - clarify the documentation for component_match_add functions. v4: Remove a few superflous "This". Reviewed-by: Rafael J. Wysocki Cc: "C, Ramalingam" Cc: Greg Kroah-Hartman Cc: Russell King Cc: Rafael J. Wysocki Cc: Jaroslav Kysela Cc: Takashi Iwai Cc: Rodrigo Vivi Cc: Jani Nikula Reviewed-by: Greg Kroah-Hartman Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20190207232759.14553-1-daniel.vetter@ffwll.ch --- Documentation/driver-api/component.rst | 17 +++++ Documentation/driver-api/device_link.rst | 3 + Documentation/driver-api/index.rst | 1 + drivers/base/component.c | 106 ++++++++++++++++++++++++++++++- include/linux/component.h | 70 ++++++++++++++++++++ 5 files changed, 194 insertions(+), 3 deletions(-) create mode 100644 Documentation/driver-api/component.rst (limited to 'include') diff --git a/Documentation/driver-api/component.rst b/Documentation/driver-api/component.rst new file mode 100644 index 000000000000..2da4a8f20607 --- /dev/null +++ b/Documentation/driver-api/component.rst @@ -0,0 +1,17 @@ +====================================== +Component Helper for Aggregate Drivers +====================================== + +.. kernel-doc:: drivers/base/component.c + :doc: overview + + +API +=== + +.. kernel-doc:: include/linux/component.h + :internal: + +.. kernel-doc:: drivers/base/component.c + :export: + diff --git a/Documentation/driver-api/device_link.rst b/Documentation/driver-api/device_link.rst index d6763272e747..2d5919b2b337 100644 --- a/Documentation/driver-api/device_link.rst +++ b/Documentation/driver-api/device_link.rst @@ -1,6 +1,9 @@ .. |struct dev_pm_domain| replace:: :c:type:`struct dev_pm_domain ` .. |struct generic_pm_domain| replace:: :c:type:`struct generic_pm_domain ` + +.. _device_link: + ============ Device links ============ diff --git a/Documentation/driver-api/index.rst b/Documentation/driver-api/index.rst index ab38ced66a44..c0b600ed9961 100644 --- a/Documentation/driver-api/index.rst +++ b/Documentation/driver-api/index.rst @@ -22,6 +22,7 @@ available subsections can be seen below. device_connection dma-buf device_link + component message-based sound frame-buffer diff --git a/drivers/base/component.c b/drivers/base/component.c index ddcea8739c12..1624c2a892a5 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -16,6 +16,32 @@ #include #include +/** + * DOC: overview + * + * The component helper allows drivers to collect a pile of sub-devices, + * including their bound drivers, into an aggregate driver. Various subsystems + * already provide functions to get hold of such components, e.g. + * of_clk_get_by_name(). The component helper can be used when such a + * subsystem-specific way to find a device is not available: The component + * helper fills the niche of aggregate drivers for specific hardware, where + * further standardization into a subsystem would not be practical. The common + * example is when a logical device (e.g. a DRM display driver) is spread around + * the SoC on various component (scanout engines, blending blocks, transcoders + * for various outputs and so on). + * + * The component helper also doesn't solve runtime dependencies, e.g. for system + * suspend and resume operations. See also :ref:`device links`. + * + * Components are registered using component_add() and unregistered with + * component_del(), usually from the driver's probe and disconnect functions. + * + * Aggregate drivers first assemble a component match list of what they need + * using component_match_add(). This is then registered as an aggregate driver + * using component_master_add_with_match(), and unregistered using + * component_master_del(). + */ + struct component; struct component_match_array { @@ -301,10 +327,24 @@ static int component_match_realloc(struct device *dev, return 0; } -/* - * Add a component to be matched, with a release function. +/** + * component_match_add_release - add a component match with release callback + * @master: device with the aggregate driver + * @matchptr: pointer to the list of component matches + * @release: release function for @compare_data + * @compare: compare function to match against all components + * @compare_data: opaque pointer passed to the @compare function + * + * Adds a new component match to the list stored in @matchptr, which the @master + * aggregate driver needs to function. The list of component matches pointed to + * by @matchptr must be initialized to NULL before adding the first match. + * + * The allocated match list in @matchptr is automatically released using devm + * actions, where upon @release will be called to free any references held by + * @compare_data, e.g. when @compare_data is a &device_node that must be + * released with of_node_put(). * - * The match array is first created or extended if necessary. + * See also component_match_add(). */ void component_match_add_release(struct device *master, struct component_match **matchptr, @@ -367,6 +407,18 @@ static void free_master(struct master *master) kfree(master); } +/** + * component_master_add_with_match - register an aggregate driver + * @dev: device with the aggregate driver + * @ops: callbacks for the aggregate driver + * @match: component match list for the aggregate driver + * + * Registers a new aggregate driver consisting of the components added to @match + * by calling one of the component_match_add() functions. Once all components in + * @match are available, it will be assembled by calling + * &component_master_ops.bind from @ops. Must be unregistered by calling + * component_master_del(). + */ int component_master_add_with_match(struct device *dev, const struct component_master_ops *ops, struct component_match *match) @@ -403,6 +455,15 @@ int component_master_add_with_match(struct device *dev, } EXPORT_SYMBOL_GPL(component_master_add_with_match); +/** + * component_master_del - unregister an aggregate driver + * @dev: device with the aggregate driver + * @ops: callbacks for the aggregate driver + * + * Unregisters an aggregate driver registered with + * component_master_add_with_match(). If necessary the aggregate driver is first + * disassembled by calling &component_master_ops.unbind from @ops. + */ void component_master_del(struct device *dev, const struct component_master_ops *ops) { @@ -430,6 +491,15 @@ static void component_unbind(struct component *component, devres_release_group(component->dev, component); } +/** + * component_unbind_all - unbind all component to an aggregate driver + * @master_dev: device with the aggregate driver + * @data: opaque pointer, passed to all components + * + * Unbinds all components to the aggregate @dev by passing @data to their + * &component_ops.unbind functions. Should be called from + * &component_master_ops.unbind. + */ void component_unbind_all(struct device *master_dev, void *data) { struct master *master; @@ -503,6 +573,15 @@ static int component_bind(struct component *component, struct master *master, return ret; } +/** + * component_bind_all - bind all component to an aggregate driver + * @master_dev: device with the aggregate driver + * @data: opaque pointer, passed to all components + * + * Binds all components to the aggregate @dev by passing @data to their + * &component_ops.bind functions. Should be called from + * &component_master_ops.bind. + */ int component_bind_all(struct device *master_dev, void *data) { struct master *master; @@ -537,6 +616,18 @@ int component_bind_all(struct device *master_dev, void *data) } EXPORT_SYMBOL_GPL(component_bind_all); +/** + * component_add - register a component + * @dev: component device + * @ops: component callbacks + * + * Register a new component for @dev. Functions in @ops will be called when the + * aggregate driver is ready to bind the overall driver by calling + * component_bind_all(). See also &struct component_ops. + * + * The component needs to be unregistered at driver unload/disconnect by calling + * component_del(). + */ int component_add(struct device *dev, const struct component_ops *ops) { struct component *component; @@ -568,6 +659,15 @@ int component_add(struct device *dev, const struct component_ops *ops) } EXPORT_SYMBOL_GPL(component_add); +/** + * component_del - unregister a component + * @dev: component device + * @ops: component callbacks + * + * Unregister a component added with component_add(). If the component is bound + * into an aggregate driver, this will force the entire aggregate driver, including + * all its components, to be unbound. + */ void component_del(struct device *dev, const struct component_ops *ops) { struct component *c, *component = NULL; diff --git a/include/linux/component.h b/include/linux/component.h index e71fbbbc74e2..83da25bdf59c 100644 --- a/include/linux/component.h +++ b/include/linux/component.h @@ -4,11 +4,31 @@ #include + struct device; +/** + * struct component_ops - callbacks for component drivers + * + * Components are registered with component_add() and unregistered with + * component_del(). + */ struct component_ops { + /** + * @bind: + * + * Called through component_bind_all() when the aggregate driver is + * ready to bind the overall driver. + */ int (*bind)(struct device *comp, struct device *master, void *master_data); + /** + * @unbind: + * + * Called through component_unbind_all() when the aggregate driver is + * ready to bind the overall driver, or when component_bind_all() fails + * part-ways through and needs to unbind some already bound components. + */ void (*unbind)(struct device *comp, struct device *master, void *master_data); }; @@ -21,8 +41,42 @@ void component_unbind_all(struct device *master, void *master_data); struct master; +/** + * struct component_master_ops - callback for the aggregate driver + * + * Aggregate drivers are registered with component_master_add_with_match() and + * unregistered with component_master_del(). + */ struct component_master_ops { + /** + * @bind: + * + * Called when all components or the aggregate driver, as specified in + * the match list passed to component_master_add_with_match(), are + * ready. Usually there are 3 steps to bind an aggregate driver: + * + * 1. Allocate a structure for the aggregate driver. + * + * 2. Bind all components to the aggregate driver by calling + * component_bind_all() with the aggregate driver structure as opaque + * pointer data. + * + * 3. Register the aggregate driver with the subsystem to publish its + * interfaces. + * + * Note that the lifetime of the aggregate driver does not align with + * any of the underlying &struct device instances. Therefore devm cannot + * be used and all resources acquired or allocated in this callback must + * be explicitly released in the @unbind callback. + */ int (*bind)(struct device *master); + /** + * @unbind: + * + * Called when either the aggregate driver, using + * component_master_del(), or one of its components, using + * component_del(), is unregistered. + */ void (*unbind)(struct device *master); }; @@ -38,6 +92,22 @@ void component_match_add_release(struct device *master, void (*release)(struct device *, void *), int (*compare)(struct device *, void *), void *compare_data); +/** + * component_match_add - add a compent match + * @master: device with the aggregate driver + * @matchptr: pointer to the list of component matches + * @compare: compare function to match against all components + * @compare_data: opaque pointer passed to the @compare function + * + * Adds a new component match to the list stored in @matchptr, which the @master + * aggregate driver needs to function. The list of component matches pointed to + * by @matchptr must be initialized to NULL before adding the first match. + * + * The allocated match list in @matchptr is automatically released using devm + * actions. + * + * See also component_match_add_release(). + */ static inline void component_match_add(struct device *master, struct component_match **matchptr, int (*compare)(struct device *, void *), void *compare_data) -- cgit v1.2.3-71-gd317 From 3521ee994bca90c57b539e106ff7e12a839aa8ea Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 8 Feb 2019 00:27:57 +0100 Subject: components: multiple components for a device Component framework is extended to support multiple components for a struct device. These will be matched with different masters based on its sub component value. We are introducing this, as I915 needs two different components with different subcomponent value, which will be matched to two different component masters(Audio and HDCP) based on the subcomponent values. v2: Add documenation. v3: Rebase on top of updated documenation. v4: Review from Rafael: - Remove redundant "This" from kerneldoc (also in the previous patch) - Streamline the logic in find_component() a bit. Signed-off-by: Daniel Vetter (v1 code) Signed-off-by: Ramalingam C (v1 commit message) Cc: Ramalingam C Cc: Greg Kroah-Hartman Cc: Russell King Cc: Rafael J. Wysocki Cc: Jaroslav Kysela Cc: Takashi Iwai Cc: Rodrigo Vivi Cc: Jani Nikula Reviewed-by: Greg Kroah-Hartman Reviewed-by: Rafael J. Wysocki Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20190207232759.14553-2-daniel.vetter@ffwll.ch --- drivers/base/component.c | 158 +++++++++++++++++++++++++++++++++++----------- include/linux/component.h | 10 ++- 2 files changed, 129 insertions(+), 39 deletions(-) (limited to 'include') diff --git a/drivers/base/component.c b/drivers/base/component.c index 1624c2a892a5..7dbc41cccd58 100644 --- a/drivers/base/component.c +++ b/drivers/base/component.c @@ -47,6 +47,7 @@ struct component; struct component_match_array { void *data; int (*compare)(struct device *, void *); + int (*compare_typed)(struct device *, int, void *); void (*release)(struct device *, void *); struct component *component; bool duplicate; @@ -74,6 +75,7 @@ struct component { bool bound; const struct component_ops *ops; + int subcomponent; struct device *dev; }; @@ -158,7 +160,7 @@ static struct master *__master_find(struct device *dev, } static struct component *find_component(struct master *master, - int (*compare)(struct device *, void *), void *compare_data) + struct component_match_array *mc) { struct component *c; @@ -166,7 +168,11 @@ static struct component *find_component(struct master *master, if (c->master && c->master != master) continue; - if (compare(c->dev, compare_data)) + if (mc->compare && mc->compare(c->dev, mc->data)) + return c; + + if (mc->compare_typed && + mc->compare_typed(c->dev, c->subcomponent, mc->data)) return c; } @@ -192,7 +198,7 @@ static int find_components(struct master *master) if (match->compare[i].component) continue; - c = find_component(master, mc->compare, mc->data); + c = find_component(master, mc); if (!c) { ret = -ENXIO; break; @@ -327,29 +333,12 @@ static int component_match_realloc(struct device *dev, return 0; } -/** - * component_match_add_release - add a component match with release callback - * @master: device with the aggregate driver - * @matchptr: pointer to the list of component matches - * @release: release function for @compare_data - * @compare: compare function to match against all components - * @compare_data: opaque pointer passed to the @compare function - * - * Adds a new component match to the list stored in @matchptr, which the @master - * aggregate driver needs to function. The list of component matches pointed to - * by @matchptr must be initialized to NULL before adding the first match. - * - * The allocated match list in @matchptr is automatically released using devm - * actions, where upon @release will be called to free any references held by - * @compare_data, e.g. when @compare_data is a &device_node that must be - * released with of_node_put(). - * - * See also component_match_add(). - */ -void component_match_add_release(struct device *master, +static void __component_match_add(struct device *master, struct component_match **matchptr, void (*release)(struct device *, void *), - int (*compare)(struct device *, void *), void *compare_data) + int (*compare)(struct device *, void *), + int (*compare_typed)(struct device *, int, void *), + void *compare_data) { struct component_match *match = *matchptr; @@ -381,13 +370,69 @@ void component_match_add_release(struct device *master, } match->compare[match->num].compare = compare; + match->compare[match->num].compare_typed = compare_typed; match->compare[match->num].release = release; match->compare[match->num].data = compare_data; match->compare[match->num].component = NULL; match->num++; } + +/** + * component_match_add_release - add a component match with release callback + * @master: device with the aggregate driver + * @matchptr: pointer to the list of component matches + * @release: release function for @compare_data + * @compare: compare function to match against all components + * @compare_data: opaque pointer passed to the @compare function + * + * Adds a new component match to the list stored in @matchptr, which the @master + * aggregate driver needs to function. The list of component matches pointed to + * by @matchptr must be initialized to NULL before adding the first match. This + * only matches against components added with component_add(). + * + * The allocated match list in @matchptr is automatically released using devm + * actions, where upon @release will be called to free any references held by + * @compare_data, e.g. when @compare_data is a &device_node that must be + * released with of_node_put(). + * + * See also component_match_add() and component_match_add_typed(). + */ +void component_match_add_release(struct device *master, + struct component_match **matchptr, + void (*release)(struct device *, void *), + int (*compare)(struct device *, void *), void *compare_data) +{ + __component_match_add(master, matchptr, release, compare, NULL, + compare_data); +} EXPORT_SYMBOL(component_match_add_release); +/** + * component_match_add_typed - add a compent match for a typed component + * @master: device with the aggregate driver + * @matchptr: pointer to the list of component matches + * @compare_typed: compare function to match against all typed components + * @compare_data: opaque pointer passed to the @compare function + * + * Adds a new component match to the list stored in @matchptr, which the @master + * aggregate driver needs to function. The list of component matches pointed to + * by @matchptr must be initialized to NULL before adding the first match. This + * only matches against components added with component_add_typed(). + * + * The allocated match list in @matchptr is automatically released using devm + * actions. + * + * See also component_match_add_release() and component_match_add_typed(). + */ +void component_match_add_typed(struct device *master, + struct component_match **matchptr, + int (*compare_typed)(struct device *, int, void *), void *compare_data) +{ + __component_match_add(master, matchptr, NULL, NULL, compare_typed, + compare_data); +} +EXPORT_SYMBOL(component_match_add_typed); + static void free_master(struct master *master) { struct component_match *match = master->match; @@ -616,19 +661,8 @@ int component_bind_all(struct device *master_dev, void *data) } EXPORT_SYMBOL_GPL(component_bind_all); -/** - * component_add - register a component - * @dev: component device - * @ops: component callbacks - * - * Register a new component for @dev. Functions in @ops will be called when the - * aggregate driver is ready to bind the overall driver by calling - * component_bind_all(). See also &struct component_ops. - * - * The component needs to be unregistered at driver unload/disconnect by calling - * component_del(). - */ -int component_add(struct device *dev, const struct component_ops *ops) +static int __component_add(struct device *dev, const struct component_ops *ops, + int subcomponent) { struct component *component; int ret; @@ -639,6 +673,7 @@ int component_add(struct device *dev, const struct component_ops *ops) component->ops = ops; component->dev = dev; + component->subcomponent = subcomponent; dev_dbg(dev, "adding component (ops %ps)\n", ops); @@ -657,6 +692,55 @@ int component_add(struct device *dev, const struct component_ops *ops) return ret < 0 ? ret : 0; } + +/** + * component_add_typed - register a component + * @dev: component device + * @ops: component callbacks + * @subcomponent: nonzero identifier for subcomponents + * + * Register a new component for @dev. Functions in @ops will be call when the + * aggregate driver is ready to bind the overall driver by calling + * component_bind_all(). See also &struct component_ops. + * + * @subcomponent must be nonzero and is used to differentiate between multiple + * components registerd on the same device @dev. These components are match + * using component_match_add_typed(). + * + * The component needs to be unregistered at driver unload/disconnect by + * calling component_del(). + * + * See also component_add(). + */ +int component_add_typed(struct device *dev, const struct component_ops *ops, + int subcomponent) +{ + if (WARN_ON(subcomponent == 0)) + return -EINVAL; + + return __component_add(dev, ops, subcomponent); +} +EXPORT_SYMBOL_GPL(component_add_typed); + +/** + * component_add - register a component + * @dev: component device + * @ops: component callbacks + * + * Register a new component for @dev. Functions in @ops will be called when the + * aggregate driver is ready to bind the overall driver by calling + * component_bind_all(). See also &struct component_ops. + * + * The component needs to be unregistered at driver unload/disconnect by + * calling component_del(). + * + * See also component_add_typed() for a variant that allows multipled different + * components on the same device. + */ +int component_add(struct device *dev, const struct component_ops *ops) +{ + return __component_add(dev, ops, 0); +} EXPORT_SYMBOL_GPL(component_add); /** diff --git a/include/linux/component.h b/include/linux/component.h index 83da25bdf59c..30bcc7e590eb 100644 --- a/include/linux/component.h +++ b/include/linux/component.h @@ -34,6 +34,8 @@ struct component_ops { }; int component_add(struct device *, const struct component_ops *); +int component_add_typed(struct device *dev, const struct component_ops *ops, + int subcomponent); void component_del(struct device *, const struct component_ops *); int component_bind_all(struct device *master, void *master_data); @@ -91,6 +93,9 @@ void component_match_add_release(struct device *master, struct component_match **matchptr, void (*release)(struct device *, void *), int (*compare)(struct device *, void *), void *compare_data); +void component_match_add_typed(struct device *master, + struct component_match **matchptr, + int (*compare_typed)(struct device *, int, void *), void *compare_data); /** * component_match_add - add a compent match @@ -101,12 +106,13 @@ void component_match_add_release(struct device *master, * * Adds a new component match to the list stored in @matchptr, which the @master * aggregate driver needs to function. The list of component matches pointed to - * by @matchptr must be initialized to NULL before adding the first match. + * by @matchptr must be initialized to NULL before adding the first match. This + * only matches against components added with component_add(). * * The allocated match list in @matchptr is automatically released using devm * actions. * - * See also component_match_add_release(). + * See also component_match_add_release() and component_match_add_typed(). */ static inline void component_match_add(struct device *master, struct component_match **matchptr, -- cgit v1.2.3-71-gd317 From 8857c7d065e900a0b3829c97634c99501b606541 Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Fri, 8 Feb 2019 00:27:59 +0100 Subject: i915/snd_hdac: I915 subcomponent for the snd_hdac Since we need multiple components for I915 for different purposes (Audio & Mei_hdcp), we adopt the subcomponents methodology introduced by the previous patch (mentioned below). Author: Daniel Vetter Date: Mon Jan 28 17:08:20 2019 +0530 components: multiple components for a device Reviewed-by: Takashi Iwai Signed-off-by-by: Ramalingam C (commit message) Signed-off-by: Daniel Vetter (code) cc: Greg Kroah-Hartman cc: Russell King cc: Rafael J. Wysocki cc: Jaroslav Kysela cc: Takashi Iwai cc: Rodrigo Vivi cc: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20190207232759.14553-4-daniel.vetter@ffwll.ch --- drivers/gpu/drm/i915/intel_audio.c | 4 +++- include/drm/i915_component.h | 4 ++++ include/sound/hda_component.h | 5 +++-- sound/hda/hdac_component.c | 4 ++-- sound/hda/hdac_i915.c | 6 ++++-- 5 files changed, 16 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index ae55a6865d5c..b32681632f30 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -984,7 +984,9 @@ void i915_audio_component_init(struct drm_i915_private *dev_priv) { int ret; - ret = component_add(dev_priv->drm.dev, &i915_audio_component_bind_ops); + ret = component_add_typed(dev_priv->drm.dev, + &i915_audio_component_bind_ops, + I915_COMPONENT_AUDIO); if (ret < 0) { DRM_ERROR("failed to add audio component (%d)\n", ret); /* continue with reduced functionality */ diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h index fca22d463e1b..72fbb037f9b3 100644 --- a/include/drm/i915_component.h +++ b/include/drm/i915_component.h @@ -26,6 +26,10 @@ #include "drm_audio_component.h" +enum i915_component_type { + I915_COMPONENT_AUDIO = 1, +}; + /* MAX_PORT is the number of port * It must be sync with I915_MAX_PORTS defined i915_drv.h */ diff --git a/include/sound/hda_component.h b/include/sound/hda_component.h index 2ec31b358950..d4804c72d959 100644 --- a/include/sound/hda_component.h +++ b/include/sound/hda_component.h @@ -20,7 +20,7 @@ int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id, bool *audio_enabled, char *buffer, int max_bytes); int snd_hdac_acomp_init(struct hdac_bus *bus, const struct drm_audio_component_audio_ops *aops, - int (*match_master)(struct device *, void *), + int (*match_master)(struct device *, int, void *), size_t extra_size); int snd_hdac_acomp_exit(struct hdac_bus *bus); int snd_hdac_acomp_register_notifier(struct hdac_bus *bus, @@ -47,7 +47,8 @@ static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t ni } static inline int snd_hdac_acomp_init(struct hdac_bus *bus, const struct drm_audio_component_audio_ops *aops, - int (*match_master)(struct device *, void *), + int (*match_master)(struct device *, + int, void *), size_t extra_size) { return -ENODEV; diff --git a/sound/hda/hdac_component.c b/sound/hda/hdac_component.c index a6d37b9d6413..5c95933e739a 100644 --- a/sound/hda/hdac_component.c +++ b/sound/hda/hdac_component.c @@ -269,7 +269,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_acomp_register_notifier); */ int snd_hdac_acomp_init(struct hdac_bus *bus, const struct drm_audio_component_audio_ops *aops, - int (*match_master)(struct device *, void *), + int (*match_master)(struct device *, int, void *), size_t extra_size) { struct component_match *match = NULL; @@ -288,7 +288,7 @@ int snd_hdac_acomp_init(struct hdac_bus *bus, bus->audio_component = acomp; devres_add(dev, acomp); - component_match_add(dev, &match, match_master, bus); + component_match_add_typed(dev, &match, match_master, bus); ret = component_master_add_with_match(dev, &hdac_component_master_ops, match); if (ret < 0) diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c index 617ff1aa818f..7aee090e3d27 100644 --- a/sound/hda/hdac_i915.c +++ b/sound/hda/hdac_i915.c @@ -82,9 +82,11 @@ void snd_hdac_i915_set_bclk(struct hdac_bus *bus) } EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk); -static int i915_component_master_match(struct device *dev, void *data) +static int i915_component_master_match(struct device *dev, int subcomponent, + void *data) { - return !strcmp(dev->driver->name, "i915"); + return !strcmp(dev->driver->name, "i915") && + subcomponent == I915_COMPONENT_AUDIO; } /* check whether intel graphics is present */ -- cgit v1.2.3-71-gd317 From 32ea33a044842ae6c5fc7e33426e0a7bd50f8801 Mon Sep 17 00:00:00 2001 From: Tomas Winkler Date: Sat, 9 Feb 2019 18:42:05 +0200 Subject: mei: bus: export to_mei_cl_device for mei client devices drivers Export to_mei_cl_device macro, as it is needed also in the mei client drivers. Signed-off-by: Tomas Winkler Signed-off-by: Greg Kroah-Hartman --- drivers/misc/mei/bus.c | 1 - include/linux/mei_cl_bus.h | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c index fc3872fe7b25..e5456faf00e6 100644 --- a/drivers/misc/mei/bus.c +++ b/drivers/misc/mei/bus.c @@ -28,7 +28,6 @@ #include "client.h" #define to_mei_cl_driver(d) container_of(d, struct mei_cl_driver, driver) -#define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev) /** * __mei_cl_send - internal client send (write) diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index 7fde40e17c8b..03b6ba2a63f8 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h @@ -55,6 +55,8 @@ struct mei_cl_device { void *priv_data; }; +#define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev) + struct mei_cl_driver { struct device_driver driver; const char *name; -- cgit v1.2.3-71-gd317 From c68cfb718c8f97b7f7a50ed66be5feb42d0c8988 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Fri, 8 Feb 2019 17:11:25 +0000 Subject: misc: fastrpc: Add support for context Invoke method This patch adds support to compute context invoke method on the remote processor (DSP). This involves setting up the functions input and output arguments, input and output handles and mapping the dmabuf fd for the argument/handle buffers. The below diagram depicts invocation of a single method where the client and objects reside on different processors. An object could expose multiple methods which can be grouped together and referred to as an interface. ,--------, ,------, ,-----------, ,------, ,--------, | | method | | | | | | method | | | Client |------->| Stub |->| Transport |->| Skel |------->| Object | | | | | | | | | | | `--------` `------` `-----------` `------` `--------` Client: Linux user mode process that initiates the remote invocation Stub: Auto generated code linked in with the user mode process that takes care of marshaling parameters Transport: Involved in carrying an invocation from a client to an object. This involves two portions: 1) FastRPC Linux kernel driver that receives the remote invocation, queues them up and then waits for the response after signaling the remote side. 2) Service running on the remote side that dequeues the messages from the queue and dispatches them for processing. Skel: Auto generated code that takes care of un-marshaling parameters Object: Method implementation Most of the work is derived from various downstream Qualcomm kernels. Credits to various Qualcomm authors who have contributed to this code. Specially Tharun Kumar Merugu Co-developed-by: Thierry Escande Signed-off-by: Thierry Escande Signed-off-by: Srinivas Kandagatla Signed-off-by: Greg Kroah-Hartman --- drivers/misc/fastrpc.c | 730 ++++++++++++++++++++++++++++++++++++++++++++ include/uapi/misc/fastrpc.h | 23 ++ 2 files changed, 753 insertions(+) create mode 100644 include/uapi/misc/fastrpc.h (limited to 'include') diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 10b93fd5659a..cd69f8b308f6 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -2,7 +2,9 @@ // Copyright (c) 2011-2018, The Linux Foundation. All rights reserved. // Copyright (c) 2018, Linaro Limited +#include #include +#include #include #include #include @@ -14,6 +16,7 @@ #include #include #include +#include #define ADSP_DOMAIN_ID (0) #define MDSP_DOMAIN_ID (1) @@ -21,14 +24,118 @@ #define CDSP_DOMAIN_ID (3) #define FASTRPC_DEV_MAX 4 /* adsp, mdsp, slpi, cdsp*/ #define FASTRPC_MAX_SESSIONS 9 /*8 compute, 1 cpz*/ +#define FASTRPC_ALIGN 128 +#define FASTRPC_MAX_FDLIST 16 +#define FASTRPC_MAX_CRCLIST 64 +#define FASTRPC_PHYS(p) ((p) & 0xffffffff) #define FASTRPC_CTX_MAX (256) #define FASTRPC_CTXID_MASK (0xFF0) #define FASTRPC_DEVICE_NAME "fastrpc" +/* Retrives number of input buffers from the scalars parameter */ +#define REMOTE_SCALARS_INBUFS(sc) (((sc) >> 16) & 0x0ff) + +/* Retrives number of output buffers from the scalars parameter */ +#define REMOTE_SCALARS_OUTBUFS(sc) (((sc) >> 8) & 0x0ff) + +/* Retrives number of input handles from the scalars parameter */ +#define REMOTE_SCALARS_INHANDLES(sc) (((sc) >> 4) & 0x0f) + +/* Retrives number of output handles from the scalars parameter */ +#define REMOTE_SCALARS_OUTHANDLES(sc) ((sc) & 0x0f) + +#define REMOTE_SCALARS_LENGTH(sc) (REMOTE_SCALARS_INBUFS(sc) + \ + REMOTE_SCALARS_OUTBUFS(sc) + \ + REMOTE_SCALARS_INHANDLES(sc)+ \ + REMOTE_SCALARS_OUTHANDLES(sc)) +#define FASTRPC_BUILD_SCALARS(attr, method, in, out, oin, oout) \ + (((attr & 0x07) << 29) | \ + ((method & 0x1f) << 24) | \ + ((in & 0xff) << 16) | \ + ((out & 0xff) << 8) | \ + ((oin & 0x0f) << 4) | \ + (oout & 0x0f)) + +#define FASTRPC_SCALARS(method, in, out) \ + FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0) + #define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev) static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp", "sdsp", "cdsp"}; +struct fastrpc_phy_page { + u64 addr; /* physical address */ + u64 size; /* size of contiguous region */ +}; + +struct fastrpc_invoke_buf { + u32 num; /* number of contiguous regions */ + u32 pgidx; /* index to start of contiguous region */ +}; + +struct fastrpc_remote_arg { + u64 pv; + u64 len; +}; + +struct fastrpc_msg { + int pid; /* process group id */ + int tid; /* thread id */ + u64 ctx; /* invoke caller context */ + u32 handle; /* handle to invoke */ + u32 sc; /* scalars structure describing the data */ + u64 addr; /* physical address */ + u64 size; /* size of contiguous region */ +}; + +struct fastrpc_invoke_rsp { + u64 ctx; /* invoke caller context */ + int retval; /* invoke return value */ +}; + +struct fastrpc_buf { + struct fastrpc_user *fl; + struct device *dev; + void *virt; + u64 phys; + u64 size; +}; + +struct fastrpc_map { + struct list_head node; + struct fastrpc_user *fl; + int fd; + struct dma_buf *buf; + struct sg_table *table; + struct dma_buf_attachment *attach; + u64 phys; + u64 size; + void *va; + u64 len; + struct kref refcount; +}; + +struct fastrpc_invoke_ctx { + int nscalars; + int nbufs; + int retval; + int pid; + int tgid; + u32 sc; + u32 *crc; + u64 ctxid; + u64 msg_sz; + struct kref refcount; + struct list_head node; /* list of ctxs */ + struct completion work; + struct fastrpc_msg msg; + struct fastrpc_user *fl; + struct fastrpc_remote_arg *rpra; + struct fastrpc_map **maps; + struct fastrpc_buf *buf; + struct fastrpc_invoke_args *args; + struct fastrpc_channel_ctx *cctx; +}; struct fastrpc_session_ctx { struct device *dev; @@ -55,6 +162,7 @@ struct fastrpc_user { struct fastrpc_channel_ctx *cctx; struct fastrpc_session_ctx *sctx; + struct fastrpc_buf *init_mem; int tgid; int pd; @@ -64,6 +172,522 @@ struct fastrpc_user { struct mutex mutex; }; +static void fastrpc_free_map(struct kref *ref) +{ + struct fastrpc_map *map; + + map = container_of(ref, struct fastrpc_map, refcount); + + if (map->table) { + dma_buf_unmap_attachment(map->attach, map->table, + DMA_BIDIRECTIONAL); + dma_buf_detach(map->buf, map->attach); + dma_buf_put(map->buf); + } + + kfree(map); +} + +static void fastrpc_map_put(struct fastrpc_map *map) +{ + if (map) + kref_put(&map->refcount, fastrpc_free_map); +} + +static void fastrpc_map_get(struct fastrpc_map *map) +{ + if (map) + kref_get(&map->refcount); +} + +static int fastrpc_map_find(struct fastrpc_user *fl, int fd, + struct fastrpc_map **ppmap) +{ + struct fastrpc_map *map = NULL; + + mutex_lock(&fl->mutex); + list_for_each_entry(map, &fl->maps, node) { + if (map->fd == fd) { + fastrpc_map_get(map); + *ppmap = map; + mutex_unlock(&fl->mutex); + return 0; + } + } + mutex_unlock(&fl->mutex); + + return -ENOENT; +} + +static void fastrpc_buf_free(struct fastrpc_buf *buf) +{ + dma_free_coherent(buf->dev, buf->size, buf->virt, + FASTRPC_PHYS(buf->phys)); + kfree(buf); +} + +static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev, + u64 size, struct fastrpc_buf **obuf) +{ + struct fastrpc_buf *buf; + + buf = kzalloc(sizeof(*buf), GFP_KERNEL); + if (!buf) + return -ENOMEM; + + buf->fl = fl; + buf->virt = NULL; + buf->phys = 0; + buf->size = size; + buf->dev = dev; + + buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys, + GFP_KERNEL); + if (!buf->virt) + return -ENOMEM; + + if (fl->sctx && fl->sctx->sid) + buf->phys += ((u64)fl->sctx->sid << 32); + + *obuf = buf; + + return 0; +} + +static void fastrpc_context_free(struct kref *ref) +{ + struct fastrpc_invoke_ctx *ctx; + struct fastrpc_channel_ctx *cctx; + int i; + + ctx = container_of(ref, struct fastrpc_invoke_ctx, refcount); + cctx = ctx->cctx; + + for (i = 0; i < ctx->nscalars; i++) + fastrpc_map_put(ctx->maps[i]); + + if (ctx->buf) + fastrpc_buf_free(ctx->buf); + + spin_lock(&cctx->lock); + idr_remove(&cctx->ctx_idr, ctx->ctxid >> 4); + spin_unlock(&cctx->lock); + + kfree(ctx->maps); + kfree(ctx); +} + +static void fastrpc_context_get(struct fastrpc_invoke_ctx *ctx) +{ + kref_get(&ctx->refcount); +} + +static void fastrpc_context_put(struct fastrpc_invoke_ctx *ctx) +{ + kref_put(&ctx->refcount, fastrpc_context_free); +} + +static struct fastrpc_invoke_ctx *fastrpc_context_alloc( + struct fastrpc_user *user, u32 kernel, u32 sc, + struct fastrpc_invoke_args *args) +{ + struct fastrpc_channel_ctx *cctx = user->cctx; + struct fastrpc_invoke_ctx *ctx = NULL; + int ret; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return ERR_PTR(-ENOMEM); + + INIT_LIST_HEAD(&ctx->node); + ctx->fl = user; + ctx->nscalars = REMOTE_SCALARS_LENGTH(sc); + ctx->nbufs = REMOTE_SCALARS_INBUFS(sc) + + REMOTE_SCALARS_OUTBUFS(sc); + + if (ctx->nscalars) { + ctx->maps = kcalloc(ctx->nscalars, + sizeof(*ctx->maps), GFP_KERNEL); + if (!ctx->maps) { + kfree(ctx); + return ERR_PTR(-ENOMEM); + } + ctx->args = args; + } + + ctx->sc = sc; + ctx->retval = -1; + ctx->pid = current->pid; + ctx->tgid = user->tgid; + ctx->cctx = cctx; + init_completion(&ctx->work); + + spin_lock(&user->lock); + list_add_tail(&ctx->node, &user->pending); + spin_unlock(&user->lock); + + spin_lock(&cctx->lock); + ret = idr_alloc_cyclic(&cctx->ctx_idr, ctx, 1, + FASTRPC_CTX_MAX, GFP_ATOMIC); + if (ret < 0) { + spin_unlock(&cctx->lock); + goto err_idr; + } + ctx->ctxid = ret << 4; + spin_unlock(&cctx->lock); + + kref_init(&ctx->refcount); + + return ctx; +err_idr: + spin_lock(&user->lock); + list_del(&ctx->node); + spin_unlock(&user->lock); + kfree(ctx->maps); + kfree(ctx); + + return ERR_PTR(ret); +} + +static int fastrpc_map_create(struct fastrpc_user *fl, int fd, + u64 len, struct fastrpc_map **ppmap) +{ + struct fastrpc_session_ctx *sess = fl->sctx; + struct fastrpc_map *map = NULL; + int err = 0; + + if (!fastrpc_map_find(fl, fd, ppmap)) + return 0; + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (!map) + return -ENOMEM; + + INIT_LIST_HEAD(&map->node); + map->fl = fl; + map->fd = fd; + map->buf = dma_buf_get(fd); + if (!map->buf) { + err = -EINVAL; + goto get_err; + } + + map->attach = dma_buf_attach(map->buf, sess->dev); + if (IS_ERR(map->attach)) { + dev_err(sess->dev, "Failed to attach dmabuf\n"); + err = PTR_ERR(map->attach); + goto attach_err; + } + + map->table = dma_buf_map_attachment(map->attach, DMA_BIDIRECTIONAL); + if (IS_ERR(map->table)) { + err = PTR_ERR(map->table); + goto map_err; + } + + map->phys = sg_dma_address(map->table->sgl); + map->phys += ((u64)fl->sctx->sid << 32); + map->size = len; + map->va = sg_virt(map->table->sgl); + map->len = len; + kref_init(&map->refcount); + + spin_lock(&fl->lock); + list_add_tail(&map->node, &fl->maps); + spin_unlock(&fl->lock); + *ppmap = map; + + return 0; + +map_err: + dma_buf_detach(map->buf, map->attach); +attach_err: + dma_buf_put(map->buf); +get_err: + kfree(map); + + return err; +} + +/* + * Fastrpc payload buffer with metadata looks like: + * + * >>>>>> START of METADATA <<<<<<<<< + * +---------------------------------+ + * | Arguments | + * | type:(struct fastrpc_remote_arg)| + * | (0 - N) | + * +---------------------------------+ + * | Invoke Buffer list | + * | type:(struct fastrpc_invoke_buf)| + * | (0 - N) | + * +---------------------------------+ + * | Page info list | + * | type:(struct fastrpc_phy_page) | + * | (0 - N) | + * +---------------------------------+ + * | Optional info | + * |(can be specific to SoC/Firmware)| + * +---------------------------------+ + * >>>>>>>> END of METADATA <<<<<<<<< + * +---------------------------------+ + * | Inline ARGS | + * | (0-N) | + * +---------------------------------+ + */ + +static int fastrpc_get_meta_size(struct fastrpc_invoke_ctx *ctx) +{ + int size = 0; + + size = (sizeof(struct fastrpc_remote_arg) + + sizeof(struct fastrpc_invoke_buf) + + sizeof(struct fastrpc_phy_page)) * ctx->nscalars + + sizeof(u64) * FASTRPC_MAX_FDLIST + + sizeof(u32) * FASTRPC_MAX_CRCLIST; + + return size; +} + +static u64 fastrpc_get_payload_size(struct fastrpc_invoke_ctx *ctx, int metalen) +{ + u64 size = 0; + int i; + + size = ALIGN(metalen, FASTRPC_ALIGN); + for (i = 0; i < ctx->nscalars; i++) { + if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1) { + size = ALIGN(size, FASTRPC_ALIGN); + size += ctx->args[i].length; + } + } + + return size; +} + +static int fastrpc_create_maps(struct fastrpc_invoke_ctx *ctx) +{ + struct device *dev = ctx->fl->sctx->dev; + int i, err; + + for (i = 0; i < ctx->nscalars; ++i) { + /* Make sure reserved field is set to 0 */ + if (ctx->args[i].reserved) + return -EINVAL; + + if (ctx->args[i].fd == 0 || ctx->args[i].fd == -1 || + ctx->args[i].length == 0) + continue; + + err = fastrpc_map_create(ctx->fl, ctx->args[i].fd, + ctx->args[i].length, &ctx->maps[i]); + if (err) { + dev_err(dev, "Error Creating map %d\n", err); + return -EINVAL; + } + + } + return 0; +} + +static int fastrpc_get_args(u32 kernel, struct fastrpc_invoke_ctx *ctx) +{ + struct device *dev = ctx->fl->sctx->dev; + struct fastrpc_remote_arg *rpra; + struct fastrpc_invoke_buf *list; + struct fastrpc_phy_page *pages; + int inbufs, i, err = 0; + u64 rlen, pkt_size; + uintptr_t args; + int metalen; + + + inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); + metalen = fastrpc_get_meta_size(ctx); + pkt_size = fastrpc_get_payload_size(ctx, metalen); + + err = fastrpc_create_maps(ctx); + if (err) + return err; + + ctx->msg_sz = pkt_size; + + err = fastrpc_buf_alloc(ctx->fl, dev, pkt_size, &ctx->buf); + if (err) + return err; + + rpra = ctx->buf->virt; + list = ctx->buf->virt + ctx->nscalars * sizeof(*rpra); + pages = ctx->buf->virt + ctx->nscalars * (sizeof(*list) + + sizeof(*rpra)); + args = (uintptr_t)ctx->buf->virt + metalen; + rlen = pkt_size - metalen; + ctx->rpra = rpra; + + for (i = 0; i < ctx->nbufs; ++i) { + u64 len = ctx->args[i].length; + + rpra[i].pv = 0; + rpra[i].len = len; + list[i].num = len ? 1 : 0; + list[i].pgidx = i; + + if (!len) + continue; + + pages[i].size = roundup(len, PAGE_SIZE); + + if (ctx->maps[i]) { + rpra[i].pv = (u64) ctx->args[i].ptr; + pages[i].addr = ctx->maps[i]->phys; + } else { + rlen -= ALIGN(args, FASTRPC_ALIGN) - args; + args = ALIGN(args, FASTRPC_ALIGN); + if (rlen < len) + goto bail; + + rpra[i].pv = args; + pages[i].addr = ctx->buf->phys + (pkt_size - rlen); + pages[i].addr = pages[i].addr & PAGE_MASK; + args = args + len; + rlen -= len; + } + + if (i < inbufs && !ctx->maps[i]) { + void *dst = (void *)(uintptr_t)rpra[i].pv; + void *src = (void *)(uintptr_t)ctx->args[i].ptr; + + if (!kernel) { + if (copy_from_user(dst, (void __user *)src, + len)) { + err = -EFAULT; + goto bail; + } + } else { + memcpy(dst, src, len); + } + } + } + + for (i = ctx->nbufs; i < ctx->nscalars; ++i) { + rpra[i].pv = (u64) ctx->args[i].ptr; + rpra[i].len = ctx->args[i].length; + list[i].num = ctx->args[i].length ? 1 : 0; + list[i].pgidx = i; + pages[i].addr = ctx->maps[i]->phys; + pages[i].size = ctx->maps[i]->size; + } + +bail: + if (err) + dev_err(dev, "Error: get invoke args failed:%d\n", err); + + return err; +} + +static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx, + u32 kernel) +{ + struct fastrpc_remote_arg *rpra = ctx->rpra; + int i, inbufs; + + inbufs = REMOTE_SCALARS_INBUFS(ctx->sc); + + for (i = inbufs; i < ctx->nbufs; ++i) { + void *src = (void *)(uintptr_t)rpra[i].pv; + void *dst = (void *)(uintptr_t)ctx->args[i].ptr; + u64 len = rpra[i].len; + + if (!kernel) { + if (copy_to_user((void __user *)dst, src, len)) + return -EFAULT; + } else { + memcpy(dst, src, len); + } + } + + return 0; +} + +static int fastrpc_invoke_send(struct fastrpc_session_ctx *sctx, + struct fastrpc_invoke_ctx *ctx, + u32 kernel, uint32_t handle) +{ + struct fastrpc_channel_ctx *cctx; + struct fastrpc_user *fl = ctx->fl; + struct fastrpc_msg *msg = &ctx->msg; + + cctx = fl->cctx; + msg->pid = fl->tgid; + msg->tid = current->pid; + + if (kernel) + msg->pid = 0; + + msg->ctx = ctx->ctxid | fl->pd; + msg->handle = handle; + msg->sc = ctx->sc; + msg->addr = ctx->buf ? ctx->buf->phys : 0; + msg->size = roundup(ctx->msg_sz, PAGE_SIZE); + fastrpc_context_get(ctx); + + return rpmsg_send(cctx->rpdev->ept, (void *)msg, sizeof(*msg)); +} + +static int fastrpc_internal_invoke(struct fastrpc_user *fl, u32 kernel, + u32 handle, u32 sc, + struct fastrpc_invoke_args *args) +{ + struct fastrpc_invoke_ctx *ctx = NULL; + int err = 0; + + if (!fl->sctx) + return -EINVAL; + + ctx = fastrpc_context_alloc(fl, kernel, sc, args); + if (IS_ERR(ctx)) + return PTR_ERR(ctx); + + if (ctx->nscalars) { + err = fastrpc_get_args(kernel, ctx); + if (err) + goto bail; + } + /* Send invoke buffer to remote dsp */ + err = fastrpc_invoke_send(fl->sctx, ctx, kernel, handle); + if (err) + goto bail; + + /* Wait for remote dsp to respond or time out */ + err = wait_for_completion_interruptible(&ctx->work); + if (err) + goto bail; + + /* Check the response from remote dsp */ + err = ctx->retval; + if (err) + goto bail; + + if (ctx->nscalars) { + /* populate all the output buffers with results */ + err = fastrpc_put_args(ctx, kernel); + if (err) + goto bail; + } + +bail: + /* We are done with this compute context, remove it from pending list */ + spin_lock(&fl->lock); + list_del(&ctx->node); + spin_unlock(&fl->lock); + fastrpc_context_put(ctx); + + if (err) + dev_dbg(fl->sctx->dev, "Error: Invoke Failed %d\n", err); + + return err; +} + static struct fastrpc_session_ctx *fastrpc_session_alloc( struct fastrpc_channel_ctx *cctx) { @@ -95,11 +719,26 @@ static int fastrpc_device_release(struct inode *inode, struct file *file) { struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; struct fastrpc_channel_ctx *cctx = fl->cctx; + struct fastrpc_invoke_ctx *ctx, *n; + struct fastrpc_map *map, *m; spin_lock(&cctx->lock); list_del(&fl->user); spin_unlock(&cctx->lock); + if (fl->init_mem) + fastrpc_buf_free(fl->init_mem); + + list_for_each_entry_safe(ctx, n, &fl->pending, node) { + list_del(&ctx->node); + fastrpc_context_put(ctx); + } + + list_for_each_entry_safe(map, m, &fl->maps, node) { + list_del(&map->node); + fastrpc_map_put(map); + } + fastrpc_session_free(cctx, fl->sctx); mutex_destroy(&fl->mutex); @@ -134,9 +773,60 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) return 0; } +static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp) +{ + struct fastrpc_invoke_args *args = NULL; + struct fastrpc_invoke inv; + u32 nscalars; + int err; + + if (copy_from_user(&inv, argp, sizeof(inv))) + return -EFAULT; + + /* nscalars is truncated here to max supported value */ + nscalars = REMOTE_SCALARS_LENGTH(inv.sc); + if (nscalars) { + args = kcalloc(nscalars, sizeof(*args), GFP_KERNEL); + if (!args) + return -ENOMEM; + + if (copy_from_user(args, (void __user *)(uintptr_t)inv.args, + nscalars * sizeof(*args))) { + kfree(args); + return -EFAULT; + } + } + + err = fastrpc_internal_invoke(fl, false, inv.handle, inv.sc, args); + kfree(args); + + return err; +} + +static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; + char __user *argp = (char __user *)arg; + int err; + + switch (cmd) { + case FASTRPC_IOCTL_INVOKE: + err = fastrpc_invoke(fl, argp); + break; + default: + err = -ENOTTY; + break; + } + + return err; +} + static const struct file_operations fastrpc_fops = { .open = fastrpc_device_open, .release = fastrpc_device_release, + .unlocked_ioctl = fastrpc_device_ioctl, + .compat_ioctl = fastrpc_device_ioctl, }; static int fastrpc_cb_probe(struct platform_device *pdev) @@ -260,9 +950,25 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev) return of_platform_populate(rdev->of_node, NULL, NULL, rdev); } +static void fastrpc_notify_users(struct fastrpc_user *user) +{ + struct fastrpc_invoke_ctx *ctx; + + spin_lock(&user->lock); + list_for_each_entry(ctx, &user->pending, node) + complete(&ctx->work); + spin_unlock(&user->lock); +} + static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev) { struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev); + struct fastrpc_user *user; + + spin_lock(&cctx->lock); + list_for_each_entry(user, &cctx->users, user) + fastrpc_notify_users(user); + spin_unlock(&cctx->lock); misc_deregister(&cctx->miscdev); of_platform_depopulate(&rpdev->dev); @@ -272,6 +978,30 @@ static void fastrpc_rpmsg_remove(struct rpmsg_device *rpdev) static int fastrpc_rpmsg_callback(struct rpmsg_device *rpdev, void *data, int len, void *priv, u32 addr) { + struct fastrpc_channel_ctx *cctx = dev_get_drvdata(&rpdev->dev); + struct fastrpc_invoke_rsp *rsp = data; + struct fastrpc_invoke_ctx *ctx; + unsigned long flags; + unsigned long ctxid; + + if (len < sizeof(*rsp)) + return -EINVAL; + + ctxid = ((rsp->ctx & FASTRPC_CTXID_MASK) >> 4); + + spin_lock_irqsave(&cctx->lock, flags); + ctx = idr_find(&cctx->ctx_idr, ctxid); + spin_unlock_irqrestore(&cctx->lock, flags); + + if (!ctx) { + dev_err(&rpdev->dev, "No context ID matches response\n"); + return -ENOENT; + } + + ctx->retval = rsp->retval; + complete(&ctx->work); + fastrpc_context_put(ctx); + return 0; } diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h new file mode 100644 index 000000000000..a69ef33dc37e --- /dev/null +++ b/include/uapi/misc/fastrpc.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef __QCOM_FASTRPC_H__ +#define __QCOM_FASTRPC_H__ + +#include + +#define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke) + +struct fastrpc_invoke_args { + __u64 ptr; + __u64 length; + __s32 fd; + __u32 reserved; +}; + +struct fastrpc_invoke { + __u32 handle; + __u32 sc; + __u64 args; +}; + +#endif /* __QCOM_FASTRPC_H__ */ -- cgit v1.2.3-71-gd317 From d73f71c7c6ee1583c08c214c8f7b20d841490b36 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Fri, 8 Feb 2019 17:11:26 +0000 Subject: misc: fastrpc: Add support for create remote init process This patch adds support to create or attach remote shell process. The shell process called fastrpc_shell_0 is usually loaded on the DSP when a user process is spawned. Most of the work is derived from various downstream Qualcomm kernels. Credits to various Qualcomm authors who have contributed to this code. Specially Tharun Kumar Merugu Co-developed-by: Thierry Escande Signed-off-by: Thierry Escande Signed-off-by: Srinivas Kandagatla Signed-off-by: Greg Kroah-Hartman --- drivers/misc/fastrpc.c | 156 ++++++++++++++++++++++++++++++++++++++++++++ include/uapi/misc/fastrpc.h | 10 +++ 2 files changed, 166 insertions(+) (limited to 'include') diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index cd69f8b308f6..ceb498487569 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -29,7 +29,10 @@ #define FASTRPC_MAX_CRCLIST 64 #define FASTRPC_PHYS(p) ((p) & 0xffffffff) #define FASTRPC_CTX_MAX (256) +#define FASTRPC_INIT_HANDLE 1 #define FASTRPC_CTXID_MASK (0xFF0) +#define INIT_FILELEN_MAX (2 * 1024 * 1024) +#define INIT_MEMLEN_MAX (8 * 1024 * 1024) #define FASTRPC_DEVICE_NAME "fastrpc" /* Retrives number of input buffers from the scalars parameter */ @@ -59,6 +62,14 @@ #define FASTRPC_SCALARS(method, in, out) \ FASTRPC_BUILD_SCALARS(0, method, in, out, 0, 0) +#define FASTRPC_CREATE_PROCESS_NARGS 6 +/* Remote Method id table */ +#define FASTRPC_RMID_INIT_ATTACH 0 +#define FASTRPC_RMID_INIT_RELEASE 1 +#define FASTRPC_RMID_INIT_CREATE 6 +#define FASTRPC_RMID_INIT_CREATE_ATTR 7 +#define FASTRPC_RMID_INIT_CREATE_STATIC 8 + #define miscdev_to_cctx(d) container_of(d, struct fastrpc_channel_ctx, miscdev) static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp", @@ -688,6 +699,109 @@ bail: return err; } +static int fastrpc_init_create_process(struct fastrpc_user *fl, + char __user *argp) +{ + struct fastrpc_init_create init; + struct fastrpc_invoke_args *args; + struct fastrpc_phy_page pages[1]; + struct fastrpc_map *map = NULL; + struct fastrpc_buf *imem = NULL; + int memlen; + int err; + struct { + int pgid; + u32 namelen; + u32 filelen; + u32 pageslen; + u32 attrs; + u32 siglen; + } inbuf; + u32 sc; + + args = kcalloc(FASTRPC_CREATE_PROCESS_NARGS, sizeof(*args), GFP_KERNEL); + if (!args) + return -ENOMEM; + + if (copy_from_user(&init, argp, sizeof(init))) { + err = -EFAULT; + goto bail; + } + + if (init.filelen > INIT_FILELEN_MAX) { + err = -EINVAL; + goto bail; + } + + inbuf.pgid = fl->tgid; + inbuf.namelen = strlen(current->comm) + 1; + inbuf.filelen = init.filelen; + inbuf.pageslen = 1; + inbuf.attrs = init.attrs; + inbuf.siglen = init.siglen; + fl->pd = 1; + + if (init.filelen && init.filefd) { + err = fastrpc_map_create(fl, init.filefd, init.filelen, &map); + if (err) + goto bail; + } + + memlen = ALIGN(max(INIT_FILELEN_MAX, (int)init.filelen * 4), + 1024 * 1024); + err = fastrpc_buf_alloc(fl, fl->sctx->dev, memlen, + &imem); + if (err) { + fastrpc_map_put(map); + goto bail; + } + + fl->init_mem = imem; + args[0].ptr = (u64)(uintptr_t)&inbuf; + args[0].length = sizeof(inbuf); + args[0].fd = -1; + + args[1].ptr = (u64)(uintptr_t)current->comm; + args[1].length = inbuf.namelen; + args[1].fd = -1; + + args[2].ptr = (u64) init.file; + args[2].length = inbuf.filelen; + args[2].fd = init.filefd; + + pages[0].addr = imem->phys; + pages[0].size = imem->size; + + args[3].ptr = (u64)(uintptr_t) pages; + args[3].length = 1 * sizeof(*pages); + args[3].fd = -1; + + args[4].ptr = (u64)(uintptr_t)&inbuf.attrs; + args[4].length = sizeof(inbuf.attrs); + args[4].fd = -1; + + args[5].ptr = (u64)(uintptr_t) &inbuf.siglen; + args[5].length = sizeof(inbuf.siglen); + args[5].fd = -1; + + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE, 4, 0); + if (init.attrs) + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_CREATE_ATTR, 6, 0); + + err = fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, + sc, args); + + if (err) { + fastrpc_map_put(map); + fastrpc_buf_free(imem); + } + +bail: + kfree(args); + + return err; +} + static struct fastrpc_session_ctx *fastrpc_session_alloc( struct fastrpc_channel_ctx *cctx) { @@ -715,6 +829,23 @@ static void fastrpc_session_free(struct fastrpc_channel_ctx *cctx, spin_unlock(&cctx->lock); } +static int fastrpc_release_current_dsp_process(struct fastrpc_user *fl) +{ + struct fastrpc_invoke_args args[1]; + int tgid = 0; + u32 sc; + + tgid = fl->tgid; + args[0].ptr = (u64)(uintptr_t) &tgid; + args[0].length = sizeof(tgid); + args[0].fd = -1; + args[0].reserved = 0; + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_RELEASE, 1, 0); + + return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, + sc, &args[0]); +} + static int fastrpc_device_release(struct inode *inode, struct file *file) { struct fastrpc_user *fl = (struct fastrpc_user *)file->private_data; @@ -722,6 +853,8 @@ static int fastrpc_device_release(struct inode *inode, struct file *file) struct fastrpc_invoke_ctx *ctx, *n; struct fastrpc_map *map, *m; + fastrpc_release_current_dsp_process(fl); + spin_lock(&cctx->lock); list_del(&fl->user); spin_unlock(&cctx->lock); @@ -773,6 +906,23 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) return 0; } +static int fastrpc_init_attach(struct fastrpc_user *fl) +{ + struct fastrpc_invoke_args args[1]; + int tgid = fl->tgid; + u32 sc; + + args[0].ptr = (u64)(uintptr_t) &tgid; + args[0].length = sizeof(tgid); + args[0].fd = -1; + args[0].reserved = 0; + sc = FASTRPC_SCALARS(FASTRPC_RMID_INIT_ATTACH, 1, 0); + fl->pd = 0; + + return fastrpc_internal_invoke(fl, true, FASTRPC_INIT_HANDLE, + sc, &args[0]); +} + static int fastrpc_invoke(struct fastrpc_user *fl, char __user *argp) { struct fastrpc_invoke_args *args = NULL; @@ -814,6 +964,12 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, case FASTRPC_IOCTL_INVOKE: err = fastrpc_invoke(fl, argp); break; + case FASTRPC_IOCTL_INIT_ATTACH: + err = fastrpc_init_attach(fl); + break; + case FASTRPC_IOCTL_INIT_CREATE: + err = fastrpc_init_create_process(fl, argp); + break; default: err = -ENOTTY; break; diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h index a69ef33dc37e..32d191c3b7bc 100644 --- a/include/uapi/misc/fastrpc.h +++ b/include/uapi/misc/fastrpc.h @@ -6,6 +6,8 @@ #include #define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke) +#define FASTRPC_IOCTL_INIT_ATTACH _IO('R', 4) +#define FASTRPC_IOCTL_INIT_CREATE _IOWR('R', 5, struct fastrpc_init_create) struct fastrpc_invoke_args { __u64 ptr; @@ -20,4 +22,12 @@ struct fastrpc_invoke { __u64 args; }; +struct fastrpc_init_create { + __u32 filelen; /* elf file length */ + __s32 filefd; /* fd for the file */ + __u32 attrs; + __u32 siglen; + __u64 file; /* pointer to elf file */ +}; + #endif /* __QCOM_FASTRPC_H__ */ -- cgit v1.2.3-71-gd317 From 6cffd79504ce040f460831030d3069fa1c99bb71 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Fri, 8 Feb 2019 17:11:27 +0000 Subject: misc: fastrpc: Add support for dmabuf exporter User process can involve dealing with big buffer sizes, and also passing buffers from one compute context bank to other compute context bank for complex dsp algorithms. This patch adds support to fastrpc to make it a proper dmabuf exporter to avoid making copies of buffers. Co-developed-by: Thierry Escande Signed-off-by: Thierry Escande Signed-off-by: Srinivas Kandagatla Signed-off-by: Greg Kroah-Hartman --- drivers/misc/fastrpc.c | 184 ++++++++++++++++++++++++++++++++++++++++++++ include/uapi/misc/fastrpc.h | 8 ++ 2 files changed, 192 insertions(+) (limited to 'include') diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index ceb498487569..4b0db33896df 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -106,10 +106,20 @@ struct fastrpc_invoke_rsp { struct fastrpc_buf { struct fastrpc_user *fl; + struct dma_buf *dmabuf; struct device *dev; void *virt; u64 phys; u64 size; + /* Lock for dma buf attachments */ + struct mutex lock; + struct list_head attachments; +}; + +struct fastrpc_dma_buf_attachment { + struct device *dev; + struct sg_table sgt; + struct list_head node; }; struct fastrpc_map { @@ -246,6 +256,9 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev, if (!buf) return -ENOMEM; + INIT_LIST_HEAD(&buf->attachments); + mutex_init(&buf->lock); + buf->fl = fl; buf->virt = NULL; buf->phys = 0; @@ -360,6 +373,111 @@ err_idr: return ERR_PTR(ret); } +static struct sg_table * +fastrpc_map_dma_buf(struct dma_buf_attachment *attachment, + enum dma_data_direction dir) +{ + struct fastrpc_dma_buf_attachment *a = attachment->priv; + struct sg_table *table; + + table = &a->sgt; + + if (!dma_map_sg(attachment->dev, table->sgl, table->nents, dir)) + return ERR_PTR(-ENOMEM); + + return table; +} + +static void fastrpc_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *table, + enum dma_data_direction dir) +{ + dma_unmap_sg(attach->dev, table->sgl, table->nents, dir); +} + +static void fastrpc_release(struct dma_buf *dmabuf) +{ + struct fastrpc_buf *buffer = dmabuf->priv; + + fastrpc_buf_free(buffer); +} + +static int fastrpc_dma_buf_attach(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct fastrpc_dma_buf_attachment *a; + struct fastrpc_buf *buffer = dmabuf->priv; + int ret; + + a = kzalloc(sizeof(*a), GFP_KERNEL); + if (!a) + return -ENOMEM; + + ret = dma_get_sgtable(buffer->dev, &a->sgt, buffer->virt, + FASTRPC_PHYS(buffer->phys), buffer->size); + if (ret < 0) { + dev_err(buffer->dev, "failed to get scatterlist from DMA API\n"); + return -EINVAL; + } + + a->dev = attachment->dev; + INIT_LIST_HEAD(&a->node); + attachment->priv = a; + + mutex_lock(&buffer->lock); + list_add(&a->node, &buffer->attachments); + mutex_unlock(&buffer->lock); + + return 0; +} + +static void fastrpc_dma_buf_detatch(struct dma_buf *dmabuf, + struct dma_buf_attachment *attachment) +{ + struct fastrpc_dma_buf_attachment *a = attachment->priv; + struct fastrpc_buf *buffer = dmabuf->priv; + + mutex_lock(&buffer->lock); + list_del(&a->node); + mutex_unlock(&buffer->lock); + kfree(a); +} + +static void *fastrpc_kmap(struct dma_buf *dmabuf, unsigned long pgnum) +{ + struct fastrpc_buf *buf = dmabuf->priv; + + return buf->virt ? buf->virt + pgnum * PAGE_SIZE : NULL; +} + +static void *fastrpc_vmap(struct dma_buf *dmabuf) +{ + struct fastrpc_buf *buf = dmabuf->priv; + + return buf->virt; +} + +static int fastrpc_mmap(struct dma_buf *dmabuf, + struct vm_area_struct *vma) +{ + struct fastrpc_buf *buf = dmabuf->priv; + size_t size = vma->vm_end - vma->vm_start; + + return dma_mmap_coherent(buf->dev, vma, buf->virt, + FASTRPC_PHYS(buf->phys), size); +} + +static const struct dma_buf_ops fastrpc_dma_buf_ops = { + .attach = fastrpc_dma_buf_attach, + .detach = fastrpc_dma_buf_detatch, + .map_dma_buf = fastrpc_map_dma_buf, + .unmap_dma_buf = fastrpc_unmap_dma_buf, + .mmap = fastrpc_mmap, + .map = fastrpc_kmap, + .vmap = fastrpc_vmap, + .release = fastrpc_release, +}; + static int fastrpc_map_create(struct fastrpc_user *fl, int fd, u64 len, struct fastrpc_map **ppmap) { @@ -906,6 +1024,66 @@ static int fastrpc_device_open(struct inode *inode, struct file *filp) return 0; } +static int fastrpc_dmabuf_free(struct fastrpc_user *fl, char __user *argp) +{ + struct dma_buf *buf; + int info; + + if (copy_from_user(&info, argp, sizeof(info))) + return -EFAULT; + + buf = dma_buf_get(info); + if (IS_ERR_OR_NULL(buf)) + return -EINVAL; + /* + * one for the last get and other for the ALLOC_DMA_BUFF ioctl + */ + dma_buf_put(buf); + dma_buf_put(buf); + + return 0; +} + +static int fastrpc_dmabuf_alloc(struct fastrpc_user *fl, char __user *argp) +{ + struct fastrpc_alloc_dma_buf bp; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + struct fastrpc_buf *buf = NULL; + int err; + + if (copy_from_user(&bp, argp, sizeof(bp))) + return -EFAULT; + + err = fastrpc_buf_alloc(fl, fl->sctx->dev, bp.size, &buf); + if (err) + return err; + exp_info.ops = &fastrpc_dma_buf_ops; + exp_info.size = bp.size; + exp_info.flags = O_RDWR; + exp_info.priv = buf; + buf->dmabuf = dma_buf_export(&exp_info); + if (IS_ERR(buf->dmabuf)) { + err = PTR_ERR(buf->dmabuf); + fastrpc_buf_free(buf); + return err; + } + + bp.fd = dma_buf_fd(buf->dmabuf, O_ACCMODE); + if (bp.fd < 0) { + dma_buf_put(buf->dmabuf); + return -EINVAL; + } + + if (copy_to_user(argp, &bp, sizeof(bp))) { + dma_buf_put(buf->dmabuf); + return -EFAULT; + } + + get_dma_buf(buf->dmabuf); + + return 0; +} + static int fastrpc_init_attach(struct fastrpc_user *fl) { struct fastrpc_invoke_args args[1]; @@ -970,6 +1148,12 @@ static long fastrpc_device_ioctl(struct file *file, unsigned int cmd, case FASTRPC_IOCTL_INIT_CREATE: err = fastrpc_init_create_process(fl, argp); break; + case FASTRPC_IOCTL_FREE_DMA_BUFF: + err = fastrpc_dmabuf_free(fl, argp); + break; + case FASTRPC_IOCTL_ALLOC_DMA_BUFF: + err = fastrpc_dmabuf_alloc(fl, argp); + break; default: err = -ENOTTY; break; diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h index 32d191c3b7bc..6d701af9fc42 100644 --- a/include/uapi/misc/fastrpc.h +++ b/include/uapi/misc/fastrpc.h @@ -5,6 +5,8 @@ #include +#define FASTRPC_IOCTL_ALLOC_DMA_BUFF _IOWR('R', 1, struct fastrpc_alloc_dma_buf) +#define FASTRPC_IOCTL_FREE_DMA_BUFF _IOWR('R', 2, __u32) #define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke) #define FASTRPC_IOCTL_INIT_ATTACH _IO('R', 4) #define FASTRPC_IOCTL_INIT_CREATE _IOWR('R', 5, struct fastrpc_init_create) @@ -30,4 +32,10 @@ struct fastrpc_init_create { __u64 file; /* pointer to elf file */ }; +struct fastrpc_alloc_dma_buf { + __s32 fd; /* fd */ + __u32 flags; /* flags to map with */ + __u64 size; /* size */ +}; + #endif /* __QCOM_FASTRPC_H__ */ -- cgit v1.2.3-71-gd317 From 1aec4211204d9463d1fd209eb50453de16254599 Mon Sep 17 00:00:00 2001 From: Sudip Mukherjee Date: Wed, 13 Feb 2019 08:47:06 +0000 Subject: parport: daisy: use new parport device model Modify parport daisy driver to use the new parallel port device model. Signed-off-by: Sudip Mukherjee Signed-off-by: Greg Kroah-Hartman --- drivers/parport/daisy.c | 32 +++++++++++++++++++++++++++++++- drivers/parport/probe.c | 2 +- drivers/parport/share.c | 10 +++++++++- include/linux/parport.h | 13 +++++++++++++ 4 files changed, 54 insertions(+), 3 deletions(-) (limited to 'include') diff --git a/drivers/parport/daisy.c b/drivers/parport/daisy.c index 5484a46dafda..56dd83a45e55 100644 --- a/drivers/parport/daisy.c +++ b/drivers/parport/daisy.c @@ -213,10 +213,12 @@ void parport_daisy_fini(struct parport *port) struct pardevice *parport_open(int devnum, const char *name) { struct daisydev *p = topology; + struct pardev_cb par_cb; struct parport *port; struct pardevice *dev; int daisy; + memset(&par_cb, 0, sizeof(par_cb)); spin_lock(&topology_lock); while (p && p->devnum != devnum) p = p->next; @@ -230,7 +232,7 @@ struct pardevice *parport_open(int devnum, const char *name) port = parport_get_port(p->port); spin_unlock(&topology_lock); - dev = parport_register_device(port, name, NULL, NULL, NULL, 0, NULL); + dev = parport_register_dev_model(port, name, &par_cb, devnum); parport_put_port(port); if (!dev) return NULL; @@ -480,3 +482,31 @@ static int assign_addrs(struct parport *port) kfree(deviceid); return detected; } + +static int daisy_drv_probe(struct pardevice *par_dev) +{ + struct device_driver *drv = par_dev->dev.driver; + + if (strcmp(drv->name, "daisy_drv")) + return -ENODEV; + if (strcmp(par_dev->name, daisy_dev_name)) + return -ENODEV; + + return 0; +} + +static struct parport_driver daisy_driver = { + .name = "daisy_drv", + .probe = daisy_drv_probe, + .devmodel = true, +}; + +int daisy_drv_init(void) +{ + return parport_register_driver(&daisy_driver); +} + +void daisy_drv_exit(void) +{ + parport_unregister_driver(&daisy_driver); +} diff --git a/drivers/parport/probe.c b/drivers/parport/probe.c index e035174ba205..e5e6a463a941 100644 --- a/drivers/parport/probe.c +++ b/drivers/parport/probe.c @@ -257,7 +257,7 @@ static ssize_t parport_read_device_id (struct parport *port, char *buffer, ssize_t parport_device_id (int devnum, char *buffer, size_t count) { ssize_t retval = -ENXIO; - struct pardevice *dev = parport_open (devnum, "Device ID probe"); + struct pardevice *dev = parport_open(devnum, daisy_dev_name); if (!dev) return -ENXIO; diff --git a/drivers/parport/share.c b/drivers/parport/share.c index 5dc53d420ca8..0171b8dbcdcd 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -137,11 +137,19 @@ static struct bus_type parport_bus_type = { int parport_bus_init(void) { - return bus_register(&parport_bus_type); + int retval; + + retval = bus_register(&parport_bus_type); + if (retval) + return retval; + daisy_drv_init(); + + return 0; } void parport_bus_exit(void) { + daisy_drv_exit(); bus_unregister(&parport_bus_type); } diff --git a/include/linux/parport.h b/include/linux/parport.h index 397607a0c0eb..f41f1d041e2c 100644 --- a/include/linux/parport.h +++ b/include/linux/parport.h @@ -460,6 +460,7 @@ extern size_t parport_ieee1284_epp_read_addr (struct parport *, void *, size_t, int); /* IEEE1284.3 functions */ +#define daisy_dev_name "Device ID probe" extern int parport_daisy_init (struct parport *port); extern void parport_daisy_fini (struct parport *port); extern struct pardevice *parport_open (int devnum, const char *name); @@ -468,6 +469,18 @@ extern ssize_t parport_device_id (int devnum, char *buffer, size_t len); extern void parport_daisy_deselect_all (struct parport *port); extern int parport_daisy_select (struct parport *port, int daisy, int mode); +#ifdef CONFIG_PARPORT_1284 +extern int daisy_drv_init(void); +extern void daisy_drv_exit(void); +#else +static inline int daisy_drv_init(void) +{ + return 0; +} + +static inline void daisy_drv_exit(void) {} +#endif + /* Lowlevel drivers _can_ call this support function to handle irqs. */ static inline void parport_generic_irq(struct parport *port) { -- cgit v1.2.3-71-gd317 From 593db80390cf40f1b9dcc790020d2edae87183fb Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 10 Jan 2019 16:25:32 +0200 Subject: vmbus: Switch to use new generic UUID API There are new types and helpers that are supposed to be used in new code. As a preparation to get rid of legacy types and API functions do the conversion here. Cc: "K. Y. Srinivasan" Cc: Haiyang Zhang Cc: Stephen Hemminger Cc: devel@linuxdriverproject.org Signed-off-by: Andy Shevchenko Reviewed-by: Michael Kelley Reviewed-by: Christoph Hellwig Signed-off-by: Sasha Levin --- drivers/hv/channel.c | 4 +- drivers/hv/channel_mgmt.c | 18 ++++----- drivers/hv/hyperv_vmbus.h | 4 +- drivers/hv/vmbus_drv.c | 48 ++++++++--------------- include/linux/hyperv.h | 98 +++++++++++++++++++++++------------------------ 5 files changed, 79 insertions(+), 93 deletions(-) (limited to 'include') diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index bea4c9850247..23381c41d087 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -282,8 +282,8 @@ int vmbus_open(struct vmbus_channel *newchannel, EXPORT_SYMBOL_GPL(vmbus_open); /* Used for Hyper-V Socket: a guest client's connect() to the host */ -int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id, - const uuid_le *shv_host_servie_id) +int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id, + const guid_t *shv_host_servie_id) { struct vmbus_channel_tl_connect_request conn_msg; int ret; diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index d01689079e9b..62703b354d6d 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -141,7 +141,7 @@ static const struct vmbus_device vmbus_devs[] = { }; static const struct { - uuid_le guid; + guid_t guid; } vmbus_unsupported_devs[] = { { HV_AVMA1_GUID }, { HV_AVMA2_GUID }, @@ -171,26 +171,26 @@ static void vmbus_rescind_cleanup(struct vmbus_channel *channel) spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock, flags); } -static bool is_unsupported_vmbus_devs(const uuid_le *guid) +static bool is_unsupported_vmbus_devs(const guid_t *guid) { int i; for (i = 0; i < ARRAY_SIZE(vmbus_unsupported_devs); i++) - if (!uuid_le_cmp(*guid, vmbus_unsupported_devs[i].guid)) + if (guid_equal(guid, &vmbus_unsupported_devs[i].guid)) return true; return false; } static u16 hv_get_dev_type(const struct vmbus_channel *channel) { - const uuid_le *guid = &channel->offermsg.offer.if_type; + const guid_t *guid = &channel->offermsg.offer.if_type; u16 i; if (is_hvsock_channel(channel) || is_unsupported_vmbus_devs(guid)) return HV_UNKNOWN; for (i = HV_IDE; i < HV_UNKNOWN; i++) { - if (!uuid_le_cmp(*guid, vmbus_devs[i].guid)) + if (guid_equal(guid, &vmbus_devs[i].guid)) return i; } pr_info("Unknown GUID: %pUl\n", guid); @@ -561,10 +561,10 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) atomic_dec(&vmbus_connection.offer_in_progress); list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { - if (!uuid_le_cmp(channel->offermsg.offer.if_type, - newchannel->offermsg.offer.if_type) && - !uuid_le_cmp(channel->offermsg.offer.if_instance, - newchannel->offermsg.offer.if_instance)) { + if (guid_equal(&channel->offermsg.offer.if_type, + &newchannel->offermsg.offer.if_type) && + guid_equal(&channel->offermsg.offer.if_instance, + &newchannel->offermsg.offer.if_instance)) { fnew = false; break; } diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h index a1f6ce6e5974..cb86b133eb4d 100644 --- a/drivers/hv/hyperv_vmbus.h +++ b/drivers/hv/hyperv_vmbus.h @@ -312,8 +312,8 @@ extern const struct vmbus_channel_message_table_entry /* General vmbus interface */ -struct hv_device *vmbus_device_create(const uuid_le *type, - const uuid_le *instance, +struct hv_device *vmbus_device_create(const guid_t *type, + const guid_t *instance, struct vmbus_channel *channel); int vmbus_device_register(struct hv_device *child_device_obj); diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 403fee01572c..126c2de39e35 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -654,38 +654,28 @@ static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env) return ret; } -static const uuid_le null_guid; - -static inline bool is_null_guid(const uuid_le *guid) -{ - if (uuid_le_cmp(*guid, null_guid)) - return false; - return true; -} - static const struct hv_vmbus_device_id * -hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const uuid_le *guid) - +hv_vmbus_dev_match(const struct hv_vmbus_device_id *id, const guid_t *guid) { if (id == NULL) return NULL; /* empty device table */ - for (; !is_null_guid(&id->guid); id++) - if (!uuid_le_cmp(id->guid, *guid)) + for (; !guid_is_null(&id->guid); id++) + if (guid_equal(&id->guid, guid)) return id; return NULL; } static const struct hv_vmbus_device_id * -hv_vmbus_dynid_match(struct hv_driver *drv, const uuid_le *guid) +hv_vmbus_dynid_match(struct hv_driver *drv, const guid_t *guid) { const struct hv_vmbus_device_id *id = NULL; struct vmbus_dynid *dynid; spin_lock(&drv->dynids.lock); list_for_each_entry(dynid, &drv->dynids.list, node) { - if (!uuid_le_cmp(dynid->id.guid, *guid)) { + if (guid_equal(&dynid->id.guid, guid)) { id = &dynid->id; break; } @@ -695,9 +685,7 @@ hv_vmbus_dynid_match(struct hv_driver *drv, const uuid_le *guid) return id; } -static const struct hv_vmbus_device_id vmbus_device_null = { - .guid = NULL_UUID_LE, -}; +static const struct hv_vmbus_device_id vmbus_device_null; /* * Return a matching hv_vmbus_device_id pointer. @@ -706,7 +694,7 @@ static const struct hv_vmbus_device_id vmbus_device_null = { static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv, struct hv_device *dev) { - const uuid_le *guid = &dev->dev_type; + const guid_t *guid = &dev->dev_type; const struct hv_vmbus_device_id *id; /* When driver_override is set, only bind to the matching driver */ @@ -726,7 +714,7 @@ static const struct hv_vmbus_device_id *hv_vmbus_get_id(struct hv_driver *drv, } /* vmbus_add_dynid - add a new device ID to this driver and re-probe devices */ -static int vmbus_add_dynid(struct hv_driver *drv, uuid_le *guid) +static int vmbus_add_dynid(struct hv_driver *drv, guid_t *guid) { struct vmbus_dynid *dynid; @@ -764,10 +752,10 @@ static ssize_t new_id_store(struct device_driver *driver, const char *buf, size_t count) { struct hv_driver *drv = drv_to_hv_drv(driver); - uuid_le guid; + guid_t guid; ssize_t retval; - retval = uuid_le_to_bin(buf, &guid); + retval = guid_parse(buf, &guid); if (retval) return retval; @@ -791,10 +779,10 @@ static ssize_t remove_id_store(struct device_driver *driver, const char *buf, { struct hv_driver *drv = drv_to_hv_drv(driver); struct vmbus_dynid *dynid, *n; - uuid_le guid; + guid_t guid; ssize_t retval; - retval = uuid_le_to_bin(buf, &guid); + retval = guid_parse(buf, &guid); if (retval) return retval; @@ -803,7 +791,7 @@ static ssize_t remove_id_store(struct device_driver *driver, const char *buf, list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) { struct hv_vmbus_device_id *id = &dynid->id; - if (!uuid_le_cmp(id->guid, guid)) { + if (guid_equal(&id->guid, &guid)) { list_del(&dynid->node); kfree(dynid); retval = count; @@ -1556,8 +1544,8 @@ int vmbus_add_channel_kobj(struct hv_device *dev, struct vmbus_channel *channel) * vmbus_device_create - Creates and registers a new child device * on the vmbus. */ -struct hv_device *vmbus_device_create(const uuid_le *type, - const uuid_le *instance, +struct hv_device *vmbus_device_create(const guid_t *type, + const guid_t *instance, struct vmbus_channel *channel) { struct hv_device *child_device_obj; @@ -1569,12 +1557,10 @@ struct hv_device *vmbus_device_create(const uuid_le *type, } child_device_obj->channel = channel; - memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le)); - memcpy(&child_device_obj->dev_instance, instance, - sizeof(uuid_le)); + guid_copy(&child_device_obj->dev_type, type); + guid_copy(&child_device_obj->dev_instance, instance); child_device_obj->vendor_id = 0x1414; /* MSFT vendor ID */ - return child_device_obj; } diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index dcb6977afce9..d5678a0fe598 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -222,8 +222,8 @@ static inline u32 hv_get_avail_to_write_percent( * struct contains the fundamental information about an offer. */ struct vmbus_channel_offer { - uuid_le if_type; - uuid_le if_instance; + guid_t if_type; + guid_t if_instance; /* * These two fields are not currently used. @@ -614,8 +614,8 @@ struct vmbus_channel_initiate_contact { /* Hyper-V socket: guest's connect()-ing to host */ struct vmbus_channel_tl_connect_request { struct vmbus_channel_message_header header; - uuid_le guest_endpoint_id; - uuid_le host_service_id; + guid_t guest_endpoint_id; + guid_t host_service_id; } __packed; struct vmbus_channel_version_response { @@ -714,7 +714,7 @@ enum vmbus_device_type { struct vmbus_device { u16 dev_type; - uuid_le guid; + guid_t guid; bool perf_device; }; @@ -1096,7 +1096,7 @@ struct hv_driver { bool hvsock; /* the device type supported by this driver */ - uuid_le dev_type; + guid_t dev_type; const struct hv_vmbus_device_id *id_table; struct device_driver driver; @@ -1116,10 +1116,10 @@ struct hv_driver { /* Base device object */ struct hv_device { /* the device type id of this device */ - uuid_le dev_type; + guid_t dev_type; /* the device instance id of this device */ - uuid_le dev_instance; + guid_t dev_instance; u16 vendor_id; u16 device_id; @@ -1188,102 +1188,102 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size); * {f8615163-df3e-46c5-913f-f2d2f965ed0e} */ #define HV_NIC_GUID \ - .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \ - 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e) + .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \ + 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e) /* * IDE GUID * {32412632-86cb-44a2-9b5c-50d1417354f5} */ #define HV_IDE_GUID \ - .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \ - 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5) + .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \ + 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5) /* * SCSI GUID * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */ #define HV_SCSI_GUID \ - .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \ - 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f) + .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \ + 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f) /* * Shutdown GUID * {0e0b6031-5213-4934-818b-38d90ced39db} */ #define HV_SHUTDOWN_GUID \ - .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \ - 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb) + .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \ + 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb) /* * Time Synch GUID * {9527E630-D0AE-497b-ADCE-E80AB0175CAF} */ #define HV_TS_GUID \ - .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \ - 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf) + .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \ + 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf) /* * Heartbeat GUID * {57164f39-9115-4e78-ab55-382f3bd5422d} */ #define HV_HEART_BEAT_GUID \ - .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \ - 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d) + .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \ + 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d) /* * KVP GUID * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6} */ #define HV_KVP_GUID \ - .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \ - 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6) + .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \ + 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6) /* * Dynamic memory GUID * {525074dc-8985-46e2-8057-a307dc18a502} */ #define HV_DM_GUID \ - .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \ - 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02) + .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \ + 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02) /* * Mouse GUID * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a} */ #define HV_MOUSE_GUID \ - .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \ - 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a) + .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \ + 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a) /* * Keyboard GUID * {f912ad6d-2b17-48ea-bd65-f927a61c7684} */ #define HV_KBD_GUID \ - .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \ - 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84) + .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \ + 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84) /* * VSS (Backup/Restore) GUID */ #define HV_VSS_GUID \ - .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \ - 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40) + .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \ + 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40) /* * Synthetic Video GUID * {DA0A7802-E377-4aac-8E77-0558EB1073F8} */ #define HV_SYNTHVID_GUID \ - .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \ - 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8) + .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \ + 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8) /* * Synthetic FC GUID * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda} */ #define HV_SYNTHFC_GUID \ - .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \ - 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda) + .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \ + 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda) /* * Guest File Copy Service @@ -1291,16 +1291,16 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size); */ #define HV_FCOPY_GUID \ - .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \ - 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92) + .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \ + 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92) /* * NetworkDirect. This is the guest RDMA service. * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501} */ #define HV_ND_GUID \ - .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \ - 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01) + .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \ + 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01) /* * PCI Express Pass Through @@ -1308,8 +1308,8 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size); */ #define HV_PCIE_GUID \ - .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \ - 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f) + .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \ + 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f) /* * Linux doesn't support the 3 devices: the first two are for @@ -1321,16 +1321,16 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size); */ #define HV_AVMA1_GUID \ - .guid = UUID_LE(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \ - 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5) + .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \ + 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5) #define HV_AVMA2_GUID \ - .guid = UUID_LE(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \ - 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b) + .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \ + 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b) #define HV_RDV_GUID \ - .guid = UUID_LE(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \ - 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe) + .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \ + 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe) /* * Common header for Hyper-V ICs @@ -1432,7 +1432,7 @@ struct ictimesync_ref_data { struct hyperv_service_callback { u8 msg_type; char *log_msg; - uuid_le data; + guid_t data; struct vmbus_channel *channel; void (*callback)(void *context); }; @@ -1452,8 +1452,8 @@ void vmbus_setevent(struct vmbus_channel *channel); extern __u32 vmbus_proto_version; -int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id, - const uuid_le *shv_host_servie_id); +int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id, + const guid_t *shv_host_servie_id); void vmbus_set_event(struct vmbus_channel *channel); /* Get the start of the ring buffer. */ -- cgit v1.2.3-71-gd317 From 396ae57ef1ef978d1d21cdb7586ba184a3f22453 Mon Sep 17 00:00:00 2001 From: Kimberly Brown Date: Mon, 4 Feb 2019 02:13:09 -0500 Subject: Drivers: hv: vmbus: Expose counters for interrupts and full conditions Counter values for per-channel interrupts and ring buffer full conditions are useful for investigating performance. Expose counters in sysfs for 2 types of guest to host interrupts: 1) Interrupts caused by the channel's outbound ring buffer transitioning from empty to not empty 2) Interrupts caused by the channel's inbound ring buffer transitioning from full to not full while a packet is waiting for enough buffer space to become available Expose 2 counters in sysfs for the number of times that write operations encountered a full outbound ring buffer: 1) The total number of write operations that encountered a full condition 2) The number of write operations that were the first to encounter a full condition Increment the outbound full condition counters in the hv_ringbuffer_write() function because, for most drivers, a full outbound ring buffer is detected in that function. Also increment the outbound full condition counters in the set_channel_pending_send_size() function. In the hv_sock driver, a full outbound ring buffer is detected and set_channel_pending_send_size() is called before hv_ringbuffer_write() is called. I tested this patch by confirming that the sysfs files were created and observing the counter values. The values seemed to increase by a reasonable amount when the Hyper-v related drivers were in use. Signed-off-by: Kimberly Brown Reviewed-by: Michael Kelley Signed-off-by: Sasha Levin --- Documentation/ABI/stable/sysfs-bus-vmbus | 33 +++++++++++++++++++++++ drivers/hv/ring_buffer.c | 14 +++++++++- drivers/hv/vmbus_drv.c | 36 +++++++++++++++++++++++++ include/linux/hyperv.h | 46 ++++++++++++++++++++++++++++++++ 4 files changed, 128 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/Documentation/ABI/stable/sysfs-bus-vmbus b/Documentation/ABI/stable/sysfs-bus-vmbus index 3fed8fdb873d..826689dcc2e6 100644 --- a/Documentation/ABI/stable/sysfs-bus-vmbus +++ b/Documentation/ABI/stable/sysfs-bus-vmbus @@ -146,3 +146,36 @@ KernelVersion: 4.16 Contact: Stephen Hemminger Description: Binary file created by uio_hv_generic for ring buffer Users: Userspace drivers + +What: /sys/bus/vmbus/devices//channels//intr_in_full +Date: February 2019 +KernelVersion: 5.0 +Contact: Michael Kelley +Description: Number of guest to host interrupts caused by the inbound ring + buffer transitioning from full to not full while a packet is + waiting for buffer space to become available +Users: Debugging tools + +What: /sys/bus/vmbus/devices//channels//intr_out_empty +Date: February 2019 +KernelVersion: 5.0 +Contact: Michael Kelley +Description: Number of guest to host interrupts caused by the outbound ring + buffer transitioning from empty to not empty +Users: Debugging tools + +What: /sys/bus/vmbus/devices//channels//out_full_first +Date: February 2019 +KernelVersion: 5.0 +Contact: Michael Kelley +Description: Number of write operations that were the first to encounter an + outbound ring buffer full condition +Users: Debugging tools + +What: /sys/bus/vmbus/devices//channels//out_full_total +Date: February 2019 +KernelVersion: 5.0 +Contact: Michael Kelley +Description: Total number of write operations that encountered an outbound + ring buffer full condition +Users: Debugging tools diff --git a/drivers/hv/ring_buffer.c b/drivers/hv/ring_buffer.c index 1f1a55e07733..9e8b31ccc142 100644 --- a/drivers/hv/ring_buffer.c +++ b/drivers/hv/ring_buffer.c @@ -74,8 +74,10 @@ static void hv_signal_on_write(u32 old_write, struct vmbus_channel *channel) * This is the only case we need to signal when the * ring transitions from being empty to non-empty. */ - if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) + if (old_write == READ_ONCE(rbi->ring_buffer->read_index)) { + ++channel->intr_out_empty; vmbus_setevent(channel); + } } /* Get the next write location for the specified ring buffer. */ @@ -272,10 +274,19 @@ int hv_ringbuffer_write(struct vmbus_channel *channel, * is empty since the read index == write index. */ if (bytes_avail_towrite <= totalbytes_towrite) { + ++channel->out_full_total; + + if (!channel->out_full_flag) { + ++channel->out_full_first; + channel->out_full_flag = true; + } + spin_unlock_irqrestore(&outring_info->ring_lock, flags); return -EAGAIN; } + channel->out_full_flag = false; + /* Write to the ring buffer */ next_write_location = hv_get_next_write_location(outring_info); @@ -530,6 +541,7 @@ void hv_pkt_iter_close(struct vmbus_channel *channel) if (curr_write_sz <= pending_sz) return; + ++channel->intr_in_full; vmbus_setevent(channel); } EXPORT_SYMBOL_GPL(hv_pkt_iter_close); diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 126c2de39e35..1264b17e7e9d 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1484,6 +1484,38 @@ static ssize_t channel_events_show(const struct vmbus_channel *channel, char *bu } static VMBUS_CHAN_ATTR(events, S_IRUGO, channel_events_show, NULL); +static ssize_t channel_intr_in_full_show(const struct vmbus_channel *channel, + char *buf) +{ + return sprintf(buf, "%llu\n", + (unsigned long long)channel->intr_in_full); +} +static VMBUS_CHAN_ATTR(intr_in_full, 0444, channel_intr_in_full_show, NULL); + +static ssize_t channel_intr_out_empty_show(const struct vmbus_channel *channel, + char *buf) +{ + return sprintf(buf, "%llu\n", + (unsigned long long)channel->intr_out_empty); +} +static VMBUS_CHAN_ATTR(intr_out_empty, 0444, channel_intr_out_empty_show, NULL); + +static ssize_t channel_out_full_first_show(const struct vmbus_channel *channel, + char *buf) +{ + return sprintf(buf, "%llu\n", + (unsigned long long)channel->out_full_first); +} +static VMBUS_CHAN_ATTR(out_full_first, 0444, channel_out_full_first_show, NULL); + +static ssize_t channel_out_full_total_show(const struct vmbus_channel *channel, + char *buf) +{ + return sprintf(buf, "%llu\n", + (unsigned long long)channel->out_full_total); +} +static VMBUS_CHAN_ATTR(out_full_total, 0444, channel_out_full_total_show, NULL); + static ssize_t subchannel_monitor_id_show(const struct vmbus_channel *channel, char *buf) { @@ -1509,6 +1541,10 @@ static struct attribute *vmbus_chan_attrs[] = { &chan_attr_latency.attr, &chan_attr_interrupts.attr, &chan_attr_events.attr, + &chan_attr_intr_in_full.attr, + &chan_attr_intr_out_empty.attr, + &chan_attr_out_full_first.attr, + &chan_attr_out_full_total.attr, &chan_attr_monitor_id.attr, &chan_attr_subchannel_id.attr, NULL diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index d5678a0fe598..64698ec8f2ac 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h @@ -751,6 +751,19 @@ struct vmbus_channel { u64 interrupts; /* Host to Guest interrupts */ u64 sig_events; /* Guest to Host events */ + /* + * Guest to host interrupts caused by the outbound ring buffer changing + * from empty to not empty. + */ + u64 intr_out_empty; + + /* + * Indicates that a full outbound ring buffer was encountered. The flag + * is set to true when a full outbound ring buffer is encountered and + * set to false when a write to the outbound ring buffer is completed. + */ + bool out_full_flag; + /* Channel callback's invoked in softirq context */ struct tasklet_struct callback_event; void (*onchannel_callback)(void *context); @@ -903,6 +916,24 @@ struct vmbus_channel { * vmbus_connection.work_queue and hang: see vmbus_process_offer(). */ struct work_struct add_channel_work; + + /* + * Guest to host interrupts caused by the inbound ring buffer changing + * from full to not full while a packet is waiting. + */ + u64 intr_in_full; + + /* + * The total number of write operations that encountered a full + * outbound ring buffer. + */ + u64 out_full_total; + + /* + * The number of write operations that were the first to encounter a + * full outbound ring buffer. + */ + u64 out_full_first; }; static inline bool is_hvsock_channel(const struct vmbus_channel *c) @@ -936,6 +967,21 @@ static inline void *get_per_channel_state(struct vmbus_channel *c) static inline void set_channel_pending_send_size(struct vmbus_channel *c, u32 size) { + unsigned long flags; + + if (size) { + spin_lock_irqsave(&c->outbound.ring_lock, flags); + ++c->out_full_total; + + if (!c->out_full_flag) { + ++c->out_full_first; + c->out_full_flag = true; + } + spin_unlock_irqrestore(&c->outbound.ring_lock, flags); + } else { + c->out_full_flag = false; + } + c->outbound.ring_buffer->pending_send_sz = size; } -- cgit v1.2.3-71-gd317 From 625239d4ad43590f6639737ee900884f7d801411 Mon Sep 17 00:00:00 2001 From: Loys Ollivier Date: Wed, 13 Feb 2019 16:09:28 +0100 Subject: gnss: add mtk receiver type support Add an MTK (Mediatek) type to the "GNSS_TYPE" attribute. Note that MTK receivers support a subset of NMEA 0183 with vendor extensions. Signed-off-by: Loys Ollivier Signed-off-by: Johan Hovold --- drivers/gnss/core.c | 1 + include/linux/gnss.h | 1 + 2 files changed, 2 insertions(+) (limited to 'include') diff --git a/drivers/gnss/core.c b/drivers/gnss/core.c index 4291a0dd22aa..320cfca80d5f 100644 --- a/drivers/gnss/core.c +++ b/drivers/gnss/core.c @@ -334,6 +334,7 @@ static const char * const gnss_type_names[GNSS_TYPE_COUNT] = { [GNSS_TYPE_NMEA] = "NMEA", [GNSS_TYPE_SIRF] = "SiRF", [GNSS_TYPE_UBX] = "UBX", + [GNSS_TYPE_MTK] = "MTK", }; static const char *gnss_type_name(struct gnss_device *gdev) diff --git a/include/linux/gnss.h b/include/linux/gnss.h index 43546977098c..36968a0f33e8 100644 --- a/include/linux/gnss.h +++ b/include/linux/gnss.h @@ -22,6 +22,7 @@ enum gnss_type { GNSS_TYPE_NMEA = 0, GNSS_TYPE_SIRF, GNSS_TYPE_UBX, + GNSS_TYPE_MTK, GNSS_TYPE_COUNT }; -- cgit v1.2.3-71-gd317 From 8605a1366015afb0ec7dd486262d0b4088e3c75f Mon Sep 17 00:00:00 2001 From: Ramalingam C Date: Fri, 15 Feb 2019 14:04:57 +0530 Subject: drm/i915: enum port definition is moved into i915_drm.h For the reusability of the enum port in other driver modules (like mei_hdcp), enum port definition is moved from I915 local header intel_display.h to drm/i915_drm.h Signed-off-by: Ramalingam C Acked-by: Daniel Vetter [danvet: Fix subject prefix.] Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/1550219730-17734-3-git-send-email-ramalingam.c@intel.com --- drivers/gpu/drm/i915/intel_display.h | 16 +--------------- include/drm/i915_drm.h | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 15 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h index 4262452963b3..79203666fc62 100644 --- a/drivers/gpu/drm/i915/intel_display.h +++ b/drivers/gpu/drm/i915/intel_display.h @@ -26,6 +26,7 @@ #define _INTEL_DISPLAY_H_ #include +#include enum i915_gpio { GPIOA, @@ -150,21 +151,6 @@ enum plane_id { for ((__p) = PLANE_PRIMARY; (__p) < I915_MAX_PLANES; (__p)++) \ for_each_if((__crtc)->plane_ids_mask & BIT(__p)) -enum port { - PORT_NONE = -1, - - PORT_A = 0, - PORT_B, - PORT_C, - PORT_D, - PORT_E, - PORT_F, - - I915_MAX_PORTS -}; - -#define port_name(p) ((p) + 'A') - /* * Ports identifier referenced from other drivers. * Expected to remain stable over time diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index c44703f471b3..7523e9a7b6e2 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h @@ -100,4 +100,19 @@ extern struct resource intel_graphics_stolen_res; #define INTEL_GEN11_BSM_DW1 0xc4 #define INTEL_BSM_MASK (-(1u << 20)) +enum port { + PORT_NONE = -1, + + PORT_A = 0, + PORT_B, + PORT_C, + PORT_D, + PORT_E, + PORT_F, + + I915_MAX_PORTS +}; + +#define port_name(p) ((p) + 'A') + #endif /* _I915_DRM_H_ */ -- cgit v1.2.3-71-gd317 From 1626eab70ebc61d015e69a4bc3479d9228539343 Mon Sep 17 00:00:00 2001 From: Ramalingam C Date: Fri, 15 Feb 2019 14:04:58 +0530 Subject: drm/i915: header for i915 - MEI_HDCP interface Header defines the interface for the I915 and MEI_HDCP drivers. This interface is specific to the usage of mei_hdcp from gen9+ platforms for ME FW based HDCP2.2 services. And Generic HDCP2.2 protocol specific definitions are added at drm/drm_hdcp.h. v2: Commit msg is enhanced [Daniel] v3: i915_hdcp_comp_master is defined. Signed-off-by: Ramalingam C Reviewed-by: Daniel Vetter [v2] Reviewed-by: Uma Shankar [v2] [danvet: Fix subject to drm/i915.] Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/1550219730-17734-4-git-send-email-ramalingam.c@intel.com --- include/drm/i915_mei_hdcp_interface.h | 149 ++++++++++++++++++++++++++++++++++ 1 file changed, 149 insertions(+) create mode 100644 include/drm/i915_mei_hdcp_interface.h (limited to 'include') diff --git a/include/drm/i915_mei_hdcp_interface.h b/include/drm/i915_mei_hdcp_interface.h new file mode 100644 index 000000000000..8c344255146a --- /dev/null +++ b/include/drm/i915_mei_hdcp_interface.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: (GPL-2.0+) */ +/* + * Copyright © 2017-2018 Intel Corporation + * + * Authors: + * Ramalingam C + */ + +#ifndef _I915_MEI_HDCP_INTERFACE_H_ +#define _I915_MEI_HDCP_INTERFACE_H_ + +#include +#include +#include +#include + +/** + * enum hdcp_port_type - HDCP port implementation type defined by ME FW + * @HDCP_PORT_TYPE_INVALID: Invalid hdcp port type + * @HDCP_PORT_TYPE_INTEGRATED: In-Host HDCP2.x port + * @HDCP_PORT_TYPE_LSPCON: HDCP2.2 discrete wired Tx port with LSPCON + * (HDMI 2.0) solution + * @HDCP_PORT_TYPE_CPDP: HDCP2.2 discrete wired Tx port using the CPDP (DP 1.3) + * solution + */ +enum hdcp_port_type { + HDCP_PORT_TYPE_INVALID, + HDCP_PORT_TYPE_INTEGRATED, + HDCP_PORT_TYPE_LSPCON, + HDCP_PORT_TYPE_CPDP +}; + +/** + * enum hdcp_wired_protocol - HDCP adaptation used on the port + * @HDCP_PROTOCOL_INVALID: Invalid HDCP adaptation protocol + * @HDCP_PROTOCOL_HDMI: HDMI adaptation of HDCP used on the port + * @HDCP_PROTOCOL_DP: DP adaptation of HDCP used on the port + */ +enum hdcp_wired_protocol { + HDCP_PROTOCOL_INVALID, + HDCP_PROTOCOL_HDMI, + HDCP_PROTOCOL_DP +}; + +/** + * struct hdcp_port_data - intel specific HDCP port data + * @port: port index as per I915 + * @port_type: HDCP port type as per ME FW classification + * @protocol: HDCP adaptation as per ME FW + * @k: No of streams transmitted on a port. Only on DP MST this is != 1 + * @seq_num_m: Count of RepeaterAuth_Stream_Manage msg propagated. + * Initialized to 0 on AKE_INIT. Incremented after every successful + * transmission of RepeaterAuth_Stream_Manage message. When it rolls + * over re-Auth has to be triggered. + * @streams: struct hdcp2_streamid_type[k]. Defines the type and id for the + * streams + */ +struct hdcp_port_data { + enum port port; + u8 port_type; + u8 protocol; + u16 k; + u32 seq_num_m; + struct hdcp2_streamid_type *streams; +}; + +/** + * struct i915_hdcp_component_ops- ops for HDCP2.2 services. + * @owner: Module providing the ops + * @initiate_hdcp2_session: Initiate a Wired HDCP2.2 Tx Session. + * And Prepare AKE_Init. + * @verify_receiver_cert_prepare_km: Verify the Receiver Certificate + * AKE_Send_Cert and prepare + AKE_Stored_Km/AKE_No_Stored_Km + * @verify_hprime: Verify AKE_Send_H_prime + * @store_pairing_info: Store pairing info received + * @initiate_locality_check: Prepare LC_Init + * @verify_lprime: Verify lprime + * @get_session_key: Prepare SKE_Send_Eks + * @repeater_check_flow_prepare_ack: Validate the Downstream topology + * and prepare rep_ack + * @verify_mprime: Verify mprime + * @enable_hdcp_authentication: Mark a port as authenticated. + * @close_hdcp_session: Close the Wired HDCP Tx session per port. + * This also disables the authenticated state of the port. + */ +struct i915_hdcp_component_ops { + /** + * @owner: mei_hdcp module + */ + struct module *owner; + + int (*initiate_hdcp2_session)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_init *ake_data); + int (*verify_receiver_cert_prepare_km)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_send_cert + *rx_cert, + bool *km_stored, + struct hdcp2_ake_no_stored_km + *ek_pub_km, + size_t *msg_sz); + int (*verify_hprime)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_send_hprime *rx_hprime); + int (*store_pairing_info)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ake_send_pairing_info + *pairing_info); + int (*initiate_locality_check)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_lc_init *lc_init_data); + int (*verify_lprime)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_lc_send_lprime *rx_lprime); + int (*get_session_key)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_ske_send_eks *ske_data); + int (*repeater_check_flow_prepare_ack)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_rep_send_receiverid_list + *rep_topology, + struct hdcp2_rep_send_ack + *rep_send_ack); + int (*verify_mprime)(struct device *dev, + struct hdcp_port_data *data, + struct hdcp2_rep_stream_ready *stream_ready); + int (*enable_hdcp_authentication)(struct device *dev, + struct hdcp_port_data *data); + int (*close_hdcp_session)(struct device *dev, + struct hdcp_port_data *data); +}; + +/** + * struct i915_hdcp_component_master - Used for communication between i915 + * and mei_hdcp drivers for the HDCP2.2 services + * @mei_dev: device that provide the HDCP2.2 service from MEI Bus. + * @hdcp_ops: Ops implemented by mei_hdcp driver, used by i915 driver. + */ +struct i915_hdcp_comp_master { + struct device *mei_dev; + const struct i915_hdcp_component_ops *ops; + + /* To protect the above members. */ + struct mutex mutex; +}; + +#endif /* _I915_MEI_HDCP_INTERFACE_H_ */ -- cgit v1.2.3-71-gd317 From 1bf7cb4d8f3d1f7388b230903cae35dfb43901d3 Mon Sep 17 00:00:00 2001 From: Ramalingam C Date: Fri, 15 Feb 2019 14:05:00 +0530 Subject: drm/i915: MEI interface definition Defining the mei-i915 interface functions and initialization of the interface. v2: Adjust to the new interface changes. [Tomas] Added further debug logs for the failures at MEI i/f. port in hdcp_port data is equipped to handle -ve values. v3: mei comp is matched for global i915 comp master. [Daniel] In hdcp_shim hdcp_protocol() is replaced with const variable. [Daniel] mei wrappers are adjusted as per the i/f change [Daniel] v4: port initialization is done only at hdcp2_init only [Danvet] v5: I915 registers a subcomponent to be matched with mei_hdcp [Daniel] v6: HDCP_disable for all connectors incase of comp_unbind. Tear down HDCP comp interface at i915_unload [Daniel] v7: Component init and fini are moved out of connector ops [Daniel] hdcp_disable is not called from unbind. [Daniel] Signed-off-by: Ramalingam C Reviewed-by: Daniel Vetter [v11] [danvet: For the topic/mei-hdcp shared branch drop everything but the header change needed by both drm/i915 and mei-hdcp. Also drop the no longer needed device.h include.] Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/1550219730-17734-6-git-send-email-ramalingam.c@intel.com --- include/drm/i915_component.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h index 72fbb037f9b3..dcb95bd9dee6 100644 --- a/include/drm/i915_component.h +++ b/include/drm/i915_component.h @@ -28,6 +28,7 @@ enum i915_component_type { I915_COMPONENT_AUDIO = 1, + I915_COMPONENT_HDCP, }; /* MAX_PORT is the number of port -- cgit v1.2.3-71-gd317 From 32097060189bf215439e719c5df514399235c52e Mon Sep 17 00:00:00 2001 From: Ramalingam C Date: Fri, 15 Feb 2019 14:05:04 +0530 Subject: drm: helper functions for hdcp2 seq_num to from u32 Library functions for endianness are aligned for 16/32/64 bits. But hdcp sequence numbers are 24bits(big endian). So for their conversion to and from u32 helper functions are developed. v2: Comment is updated. [Daniel] Reviewed-by Uma. Signed-off-by: Ramalingam C Reviewed-by: Daniel Vetter Reviewed-by: Uma Shankar Acked-by: Dave Airlie Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/1550219730-17734-10-git-send-email-ramalingam.c@intel.com --- include/drm/drm_hdcp.h | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'include') diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h index a6de09c5e47f..c21682f76cd3 100644 --- a/include/drm/drm_hdcp.h +++ b/include/drm/drm_hdcp.h @@ -250,4 +250,22 @@ struct hdcp2_dp_errata_stream_type { #define HDCP_2_2_HDMI_RXSTATUS_READY(x) ((x) & BIT(2)) #define HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(x) ((x) & BIT(3)) +/* + * Helper functions to convert 24bit big endian hdcp sequence number to + * host format and back + */ +static inline +u32 drm_hdcp2_seq_num_to_u32(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN]) +{ + return (u32)(seq_num[2] | seq_num[1] << 8 | seq_num[0] << 16); +} + +static inline +void drm_hdcp2_u32_to_seq_num(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN], u32 val) +{ + seq_num[0] = val >> 16; + seq_num[1] = val >> 8; + seq_num[2] = val; +} + #endif -- cgit v1.2.3-71-gd317 From 99b9d7b4970cf131fd17a8f4ad4870049bd7a365 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Sat, 16 Feb 2019 00:39:13 +0200 Subject: habanalabs: add basic Goya support This patch adds a basic support for the Goya device. The code initializes the device's PCI controller and PCI bars. It also initializes various S/W structures and adds some basic helper functions. Reviewed-by: Mike Rapoport Signed-off-by: Oded Gabbay Signed-off-by: Greg Kroah-Hartman --- drivers/misc/habanalabs/Makefile | 3 + drivers/misc/habanalabs/device.c | 71 ++++ drivers/misc/habanalabs/goya/Makefile | 3 + drivers/misc/habanalabs/goya/goya.c | 632 ++++++++++++++++++++++++++++ drivers/misc/habanalabs/goya/goyaP.h | 152 +++++++ drivers/misc/habanalabs/habanalabs.h | 137 ++++++ drivers/misc/habanalabs/habanalabs_drv.c | 3 + drivers/misc/habanalabs/include/goya/goya.h | 45 ++ include/uapi/misc/habanalabs.h | 20 + 9 files changed, 1066 insertions(+) create mode 100644 drivers/misc/habanalabs/goya/Makefile create mode 100644 drivers/misc/habanalabs/goya/goya.c create mode 100644 drivers/misc/habanalabs/goya/goyaP.h create mode 100644 drivers/misc/habanalabs/include/goya/goya.h create mode 100644 include/uapi/misc/habanalabs.h (limited to 'include') diff --git a/drivers/misc/habanalabs/Makefile b/drivers/misc/habanalabs/Makefile index 0910f7fa34ec..6f1ead69bd77 100644 --- a/drivers/misc/habanalabs/Makefile +++ b/drivers/misc/habanalabs/Makefile @@ -5,3 +5,6 @@ obj-m := habanalabs.o habanalabs-y := habanalabs_drv.o device.o + +include $(src)/goya/Makefile +habanalabs-y += $(HL_GOYA_FILES) diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c index 6189fd0e2ccd..a4feaa784db3 100644 --- a/drivers/misc/habanalabs/device.c +++ b/drivers/misc/habanalabs/device.c @@ -120,8 +120,11 @@ err_cdev_add: */ static int device_early_init(struct hl_device *hdev) { + int rc; + switch (hdev->asic_type) { case ASIC_GOYA: + goya_set_asic_funcs(hdev); strlcpy(hdev->asic_name, "GOYA", sizeof(hdev->asic_name)); break; default: @@ -130,6 +133,10 @@ static int device_early_init(struct hl_device *hdev) return -EINVAL; } + rc = hdev->asic_funcs->early_init(hdev); + if (rc) + return rc; + return 0; } @@ -141,6 +148,10 @@ static int device_early_init(struct hl_device *hdev) */ static void device_early_fini(struct hl_device *hdev) { + + if (hdev->asic_funcs->early_fini) + hdev->asic_funcs->early_fini(hdev); + } /* @@ -154,8 +165,15 @@ static void device_early_fini(struct hl_device *hdev) */ int hl_device_suspend(struct hl_device *hdev) { + int rc; + pci_save_state(hdev->pdev); + rc = hdev->asic_funcs->suspend(hdev); + if (rc) + dev_err(hdev->dev, + "Failed to disable PCI access of device CPU\n"); + /* Shut down the device */ pci_disable_device(hdev->pdev); pci_set_power_state(hdev->pdev, PCI_D3hot); @@ -185,6 +203,13 @@ int hl_device_resume(struct hl_device *hdev) return rc; } + rc = hdev->asic_funcs->resume(hdev); + if (rc) { + dev_err(hdev->dev, + "Failed to enable PCI access from device CPU\n"); + return rc; + } + return 0; } @@ -212,11 +237,21 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass) if (rc) goto release_device; + /* + * Start calling ASIC initialization. First S/W then H/W and finally + * late init + */ + rc = hdev->asic_funcs->sw_init(hdev); + if (rc) + goto early_fini; + dev_notice(hdev->dev, "Successfully added device to habanalabs driver\n"); return 0; +early_fini: + device_early_fini(hdev); release_device: device_destroy(hclass, hdev->dev->devt); cdev_del(&hdev->cdev); @@ -247,6 +282,9 @@ void hl_device_fini(struct hl_device *hdev) /* Mark device as disabled */ hdev->disabled = true; + /* Call ASIC S/W finalize function */ + hdev->asic_funcs->sw_fini(hdev); + device_early_fini(hdev); /* Hide device from user */ @@ -338,3 +376,36 @@ int hl_poll_timeout_device_memory(struct hl_device *hdev, void __iomem *addr, return *val ? 0 : -ETIMEDOUT; } + +/* + * MMIO register access helper functions. + */ + +/* + * hl_rreg - Read an MMIO register + * + * @hdev: pointer to habanalabs device structure + * @reg: MMIO register offset (in bytes) + * + * Returns the value of the MMIO register we are asked to read + * + */ +inline u32 hl_rreg(struct hl_device *hdev, u32 reg) +{ + return readl(hdev->rmmio + reg); +} + +/* + * hl_wreg - Write to an MMIO register + * + * @hdev: pointer to habanalabs device structure + * @reg: MMIO register offset (in bytes) + * @val: 32-bit value + * + * Writes the 32-bit value into the MMIO register + * + */ +inline void hl_wreg(struct hl_device *hdev, u32 reg, u32 val) +{ + writel(val, hdev->rmmio + reg); +} diff --git a/drivers/misc/habanalabs/goya/Makefile b/drivers/misc/habanalabs/goya/Makefile new file mode 100644 index 000000000000..38d43006386d --- /dev/null +++ b/drivers/misc/habanalabs/goya/Makefile @@ -0,0 +1,3 @@ +subdir-ccflags-y += -I$(src) + +HL_GOYA_FILES := goya/goya.o diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c new file mode 100644 index 000000000000..ceaffa9afd83 --- /dev/null +++ b/drivers/misc/habanalabs/goya/goya.c @@ -0,0 +1,632 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "goyaP.h" +#include "include/goya/asic_reg/goya_masks.h" + +#include +#include +#include + +/* + * GOYA security scheme: + * + * 1. Host is protected by: + * - Range registers (When MMU is enabled, DMA RR does NOT protect host) + * - MMU + * + * 2. DRAM is protected by: + * - Range registers (protect the first 512MB) + * - MMU (isolation between users) + * + * 3. Configuration is protected by: + * - Range registers + * - Protection bits + * + * When MMU is disabled: + * + * QMAN DMA: PQ, CQ, CP, DMA are secured. + * PQ, CB and the data are on the host. + * + * QMAN TPC/MME: + * PQ, CQ and CP are not secured. + * PQ, CB and the data are on the SRAM/DRAM. + * + * Since QMAN DMA is secured, KMD is parsing the DMA CB: + * - KMD checks DMA pointer + * - WREG, MSG_PROT are not allowed. + * - MSG_LONG/SHORT are allowed. + * + * A read/write transaction by the QMAN to a protected area will succeed if + * and only if the QMAN's CP is secured and MSG_PROT is used + * + * + * When MMU is enabled: + * + * QMAN DMA: PQ, CQ and CP are secured. + * MMU is set to bypass on the Secure props register of the QMAN. + * The reasons we don't enable MMU for PQ, CQ and CP are: + * - PQ entry is in kernel address space and KMD doesn't map it. + * - CP writes to MSIX register and to kernel address space (completion + * queue). + * + * DMA is not secured but because CP is secured, KMD still needs to parse the + * CB, but doesn't need to check the DMA addresses. + * + * For QMAN DMA 0, DMA is also secured because only KMD uses this DMA and KMD + * doesn't map memory in MMU. + * + * QMAN TPC/MME: PQ, CQ and CP aren't secured (no change from MMU disabled mode) + * + * DMA RR does NOT protect host because DMA is not secured + * + */ + +#define GOYA_MMU_REGS_NUM 61 + +#define GOYA_DMA_POOL_BLK_SIZE 0x100 /* 256 bytes */ + +#define GOYA_RESET_TIMEOUT_MSEC 500 /* 500ms */ +#define GOYA_PLDM_RESET_TIMEOUT_MSEC 20000 /* 20s */ +#define GOYA_RESET_WAIT_MSEC 1 /* 1ms */ +#define GOYA_CPU_RESET_WAIT_MSEC 100 /* 100ms */ +#define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */ +#define GOYA_CPU_TIMEOUT_USEC 10000000 /* 10s */ +#define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */ + +#define GOYA_QMAN0_FENCE_VAL 0xD169B243 + +#define GOYA_MAX_INITIATORS 20 + +static void goya_get_fixed_properties(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + + prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES; + + prop->dram_base_address = DRAM_PHYS_BASE; + prop->dram_size = DRAM_PHYS_DEFAULT_SIZE; + prop->dram_end_address = prop->dram_base_address + prop->dram_size; + prop->dram_user_base_address = DRAM_BASE_ADDR_USER; + + prop->sram_base_address = SRAM_BASE_ADDR; + prop->sram_size = SRAM_SIZE; + prop->sram_end_address = prop->sram_base_address + prop->sram_size; + prop->sram_user_base_address = prop->sram_base_address + + SRAM_USER_BASE_OFFSET; + + prop->host_phys_base_address = HOST_PHYS_BASE; + prop->va_space_host_start_address = VA_HOST_SPACE_START; + prop->va_space_host_end_address = VA_HOST_SPACE_END; + prop->va_space_dram_start_address = VA_DDR_SPACE_START; + prop->va_space_dram_end_address = VA_DDR_SPACE_END; + prop->cfg_size = CFG_SIZE; + prop->max_asid = MAX_ASID; + prop->tpc_enabled_mask = TPC_ENABLED_MASK; + + prop->high_pll = PLL_HIGH_DEFAULT; +} + +/* + * goya_pci_bars_map - Map PCI BARS of Goya device + * + * @hdev: pointer to hl_device structure + * + * Request PCI regions and map them to kernel virtual addresses. + * Returns 0 on success + * + */ +int goya_pci_bars_map(struct hl_device *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int rc; + + rc = pci_request_regions(pdev, HL_NAME); + if (rc) { + dev_err(hdev->dev, "Cannot obtain PCI resources\n"); + return rc; + } + + hdev->pcie_bar[SRAM_CFG_BAR_ID] = + pci_ioremap_bar(pdev, SRAM_CFG_BAR_ID); + if (!hdev->pcie_bar[SRAM_CFG_BAR_ID]) { + dev_err(hdev->dev, "pci_ioremap_bar failed for CFG\n"); + rc = -ENODEV; + goto err_release_regions; + } + + hdev->pcie_bar[MSIX_BAR_ID] = pci_ioremap_bar(pdev, MSIX_BAR_ID); + if (!hdev->pcie_bar[MSIX_BAR_ID]) { + dev_err(hdev->dev, "pci_ioremap_bar failed for MSIX\n"); + rc = -ENODEV; + goto err_unmap_sram_cfg; + } + + hdev->pcie_bar[DDR_BAR_ID] = pci_ioremap_wc_bar(pdev, DDR_BAR_ID); + if (!hdev->pcie_bar[DDR_BAR_ID]) { + dev_err(hdev->dev, "pci_ioremap_bar failed for DDR\n"); + rc = -ENODEV; + goto err_unmap_msix; + } + + hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] + + (CFG_BASE - SRAM_BASE_ADDR); + + return 0; + +err_unmap_msix: + iounmap(hdev->pcie_bar[MSIX_BAR_ID]); +err_unmap_sram_cfg: + iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]); +err_release_regions: + pci_release_regions(pdev); + + return rc; +} + +/* + * goya_pci_bars_unmap - Unmap PCI BARS of Goya device + * + * @hdev: pointer to hl_device structure + * + * Release all PCI BARS and unmap their virtual addresses + * + */ +static void goya_pci_bars_unmap(struct hl_device *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + + iounmap(hdev->pcie_bar[DDR_BAR_ID]); + iounmap(hdev->pcie_bar[MSIX_BAR_ID]); + iounmap(hdev->pcie_bar[SRAM_CFG_BAR_ID]); + pci_release_regions(pdev); +} + +/* + * goya_elbi_write - Write through the ELBI interface + * + * @hdev: pointer to hl_device structure + * + * return 0 on success, -1 on failure + * + */ +static int goya_elbi_write(struct hl_device *hdev, u64 addr, u32 data) +{ + struct pci_dev *pdev = hdev->pdev; + ktime_t timeout; + u32 val; + + /* Clear previous status */ + pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, 0); + + pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_ADDR, (u32) addr); + pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_DATA, data); + pci_write_config_dword(pdev, mmPCI_CONFIG_ELBI_CTRL, + PCI_CONFIG_ELBI_CTRL_WRITE); + + timeout = ktime_add_ms(ktime_get(), 10); + for (;;) { + pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, &val); + if (val & PCI_CONFIG_ELBI_STS_MASK) + break; + if (ktime_compare(ktime_get(), timeout) > 0) { + pci_read_config_dword(pdev, mmPCI_CONFIG_ELBI_STS, + &val); + break; + } + usleep_range(300, 500); + } + + if ((val & PCI_CONFIG_ELBI_STS_MASK) == PCI_CONFIG_ELBI_STS_DONE) + return 0; + + if (val & PCI_CONFIG_ELBI_STS_ERR) { + dev_err(hdev->dev, "Error writing to ELBI\n"); + return -EIO; + } + + if (!(val & PCI_CONFIG_ELBI_STS_MASK)) { + dev_err(hdev->dev, "ELBI write didn't finish in time\n"); + return -EIO; + } + + dev_err(hdev->dev, "ELBI write has undefined bits in status\n"); + return -EIO; +} + +/* + * goya_iatu_write - iatu write routine + * + * @hdev: pointer to hl_device structure + * + */ +static int goya_iatu_write(struct hl_device *hdev, u32 addr, u32 data) +{ + u32 dbi_offset; + int rc; + + dbi_offset = addr & 0xFFF; + + rc = goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0x00300000); + rc |= goya_elbi_write(hdev, mmPCIE_DBI_BASE + dbi_offset, data); + + if (rc) + return -EIO; + + return 0; +} + +void goya_reset_link_through_bridge(struct hl_device *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct pci_dev *parent_port; + u16 val; + + parent_port = pdev->bus->self; + pci_read_config_word(parent_port, PCI_BRIDGE_CONTROL, &val); + val |= PCI_BRIDGE_CTL_BUS_RESET; + pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val); + ssleep(1); + + val &= ~(PCI_BRIDGE_CTL_BUS_RESET); + pci_write_config_word(parent_port, PCI_BRIDGE_CONTROL, val); + ssleep(3); +} + +/* + * goya_set_ddr_bar_base - set DDR bar to map specific device address + * + * @hdev: pointer to hl_device structure + * @addr: address in DDR. Must be aligned to DDR bar size + * + * This function configures the iATU so that the DDR bar will start at the + * specified addr. + * + */ +static int goya_set_ddr_bar_base(struct hl_device *hdev, u64 addr) +{ + struct goya_device *goya = hdev->asic_specific; + int rc; + + if ((goya) && (goya->ddr_bar_cur_addr == addr)) + return 0; + + /* Inbound Region 1 - Bar 4 - Point to DDR */ + rc = goya_iatu_write(hdev, 0x314, lower_32_bits(addr)); + rc |= goya_iatu_write(hdev, 0x318, upper_32_bits(addr)); + rc |= goya_iatu_write(hdev, 0x300, 0); + /* Enable + Bar match + match enable + Bar 4 */ + rc |= goya_iatu_write(hdev, 0x304, 0xC0080400); + + /* Return the DBI window to the default location */ + rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0); + rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0); + + if (rc) { + dev_err(hdev->dev, "failed to map DDR bar to 0x%08llx\n", addr); + return -EIO; + } + + if (goya) + goya->ddr_bar_cur_addr = addr; + + return 0; +} + +/* + * goya_init_iatu - Initialize the iATU unit inside the PCI controller + * + * @hdev: pointer to hl_device structure + * + * This is needed in case the firmware doesn't initialize the iATU + * + */ +static int goya_init_iatu(struct hl_device *hdev) +{ + int rc; + + /* Inbound Region 0 - Bar 0 - Point to SRAM_BASE_ADDR */ + rc = goya_iatu_write(hdev, 0x114, lower_32_bits(SRAM_BASE_ADDR)); + rc |= goya_iatu_write(hdev, 0x118, upper_32_bits(SRAM_BASE_ADDR)); + rc |= goya_iatu_write(hdev, 0x100, 0); + /* Enable + Bar match + match enable */ + rc |= goya_iatu_write(hdev, 0x104, 0xC0080000); + + /* Inbound Region 1 - Bar 4 - Point to DDR */ + rc |= goya_set_ddr_bar_base(hdev, DRAM_PHYS_BASE); + + /* Outbound Region 0 - Point to Host */ + rc |= goya_iatu_write(hdev, 0x008, lower_32_bits(HOST_PHYS_BASE)); + rc |= goya_iatu_write(hdev, 0x00C, upper_32_bits(HOST_PHYS_BASE)); + rc |= goya_iatu_write(hdev, 0x010, + lower_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1)); + rc |= goya_iatu_write(hdev, 0x014, 0); + rc |= goya_iatu_write(hdev, 0x018, 0); + rc |= goya_iatu_write(hdev, 0x020, + upper_32_bits(HOST_PHYS_BASE + HOST_PHYS_SIZE - 1)); + /* Increase region size */ + rc |= goya_iatu_write(hdev, 0x000, 0x00002000); + /* Enable */ + rc |= goya_iatu_write(hdev, 0x004, 0x80000000); + + /* Return the DBI window to the default location */ + rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI, 0); + rc |= goya_elbi_write(hdev, CFG_BASE + mmPCIE_AUX_DBI_32, 0); + + if (rc) + return -EIO; + + return 0; +} + +/* + * goya_early_init - GOYA early initialization code + * + * @hdev: pointer to hl_device structure + * + * Verify PCI bars + * Set DMA masks + * PCI controller initialization + * Map PCI bars + * + */ +static int goya_early_init(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct pci_dev *pdev = hdev->pdev; + u32 val; + int rc; + + goya_get_fixed_properties(hdev); + + /* Check BAR sizes */ + if (pci_resource_len(pdev, SRAM_CFG_BAR_ID) != CFG_BAR_SIZE) { + dev_err(hdev->dev, + "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n", + SRAM_CFG_BAR_ID, + (unsigned long long) pci_resource_len(pdev, + SRAM_CFG_BAR_ID), + CFG_BAR_SIZE); + return -ENODEV; + } + + if (pci_resource_len(pdev, MSIX_BAR_ID) != MSIX_BAR_SIZE) { + dev_err(hdev->dev, + "Not " HL_NAME "? BAR %d size %llu, expecting %llu\n", + MSIX_BAR_ID, + (unsigned long long) pci_resource_len(pdev, + MSIX_BAR_ID), + MSIX_BAR_SIZE); + return -ENODEV; + } + + prop->dram_pci_bar_size = pci_resource_len(pdev, DDR_BAR_ID); + + /* set DMA mask for GOYA */ + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(39)); + if (rc) { + dev_warn(hdev->dev, "Unable to set pci dma mask to 39 bits\n"); + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(hdev->dev, + "Unable to set pci dma mask to 32 bits\n"); + return rc; + } + } + + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39)); + if (rc) { + dev_warn(hdev->dev, + "Unable to set pci consistent dma mask to 39 bits\n"); + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) { + dev_err(hdev->dev, + "Unable to set pci consistent dma mask to 32 bits\n"); + return rc; + } + } + + if (hdev->reset_pcilink) + goya_reset_link_through_bridge(hdev); + + rc = pci_enable_device_mem(pdev); + if (rc) { + dev_err(hdev->dev, "can't enable PCI device\n"); + return rc; + } + + pci_set_master(pdev); + + rc = goya_init_iatu(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to initialize iATU\n"); + goto disable_device; + } + + rc = goya_pci_bars_map(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to initialize PCI BARS\n"); + goto disable_device; + } + + val = RREG32(mmPSOC_GLOBAL_CONF_BOOT_STRAP_PINS); + if (val & PSOC_GLOBAL_CONF_BOOT_STRAP_PINS_SRIOV_EN_MASK) + dev_warn(hdev->dev, + "PCI strap is not configured correctly, PCI bus errors may occur\n"); + + return 0; + +disable_device: + pci_clear_master(pdev); + pci_disable_device(pdev); + + return rc; +} + +/* + * goya_early_fini - GOYA early finalization code + * + * @hdev: pointer to hl_device structure + * + * Unmap PCI bars + * + */ +int goya_early_fini(struct hl_device *hdev) +{ + goya_pci_bars_unmap(hdev); + + pci_clear_master(hdev->pdev); + pci_disable_device(hdev->pdev); + + return 0; +} + +/* + * goya_sw_init - Goya software initialization code + * + * @hdev: pointer to hl_device structure + * + */ +static int goya_sw_init(struct hl_device *hdev) +{ + struct goya_device *goya; + int rc; + + /* Allocate device structure */ + goya = kzalloc(sizeof(*goya), GFP_KERNEL); + if (!goya) + return -ENOMEM; + + /* according to goya_init_iatu */ + goya->ddr_bar_cur_addr = DRAM_PHYS_BASE; + hdev->asic_specific = goya; + + /* Create DMA pool for small allocations */ + hdev->dma_pool = dma_pool_create(dev_name(hdev->dev), + &hdev->pdev->dev, GOYA_DMA_POOL_BLK_SIZE, 8, 0); + if (!hdev->dma_pool) { + dev_err(hdev->dev, "failed to create DMA pool\n"); + rc = -ENOMEM; + goto free_goya_device; + } + + hdev->cpu_accessible_dma_mem = + hdev->asic_funcs->dma_alloc_coherent(hdev, + CPU_ACCESSIBLE_MEM_SIZE, + &hdev->cpu_accessible_dma_address, + GFP_KERNEL | __GFP_ZERO); + + if (!hdev->cpu_accessible_dma_mem) { + dev_err(hdev->dev, + "failed to allocate %d of dma memory for CPU accessible memory space\n", + CPU_ACCESSIBLE_MEM_SIZE); + rc = -ENOMEM; + goto free_dma_pool; + } + + hdev->cpu_accessible_dma_pool = gen_pool_create(CPU_PKT_SHIFT, -1); + if (!hdev->cpu_accessible_dma_pool) { + dev_err(hdev->dev, + "Failed to create CPU accessible DMA pool\n"); + rc = -ENOMEM; + goto free_cpu_pq_dma_mem; + } + + rc = gen_pool_add(hdev->cpu_accessible_dma_pool, + (uintptr_t) hdev->cpu_accessible_dma_mem, + CPU_ACCESSIBLE_MEM_SIZE, -1); + if (rc) { + dev_err(hdev->dev, + "Failed to add memory to CPU accessible DMA pool\n"); + rc = -EFAULT; + goto free_cpu_pq_pool; + } + + spin_lock_init(&goya->hw_queues_lock); + + return 0; + +free_cpu_pq_pool: + gen_pool_destroy(hdev->cpu_accessible_dma_pool); +free_cpu_pq_dma_mem: + hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE, + hdev->cpu_accessible_dma_mem, + hdev->cpu_accessible_dma_address); +free_dma_pool: + dma_pool_destroy(hdev->dma_pool); +free_goya_device: + kfree(goya); + + return rc; +} + +/* + * goya_sw_fini - Goya software tear-down code + * + * @hdev: pointer to hl_device structure + * + */ +int goya_sw_fini(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + + gen_pool_destroy(hdev->cpu_accessible_dma_pool); + + hdev->asic_funcs->dma_free_coherent(hdev, CPU_ACCESSIBLE_MEM_SIZE, + hdev->cpu_accessible_dma_mem, + hdev->cpu_accessible_dma_address); + + dma_pool_destroy(hdev->dma_pool); + + kfree(goya); + + return 0; +} + +int goya_suspend(struct hl_device *hdev) +{ + return 0; +} + +int goya_resume(struct hl_device *hdev) +{ + return 0; +} + +void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size, + dma_addr_t *dma_handle, gfp_t flags) +{ + return dma_alloc_coherent(&hdev->pdev->dev, size, dma_handle, flags); +} + +void goya_dma_free_coherent(struct hl_device *hdev, size_t size, void *cpu_addr, + dma_addr_t dma_handle) +{ + dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, dma_handle); +} + +static const struct hl_asic_funcs goya_funcs = { + .early_init = goya_early_init, + .early_fini = goya_early_fini, + .sw_init = goya_sw_init, + .sw_fini = goya_sw_fini, + .suspend = goya_suspend, + .resume = goya_resume, + .dma_alloc_coherent = goya_dma_alloc_coherent, + .dma_free_coherent = goya_dma_free_coherent, +}; + +/* + * goya_set_asic_funcs - set Goya function pointers + * + * @*hdev: pointer to hl_device structure + * + */ +void goya_set_asic_funcs(struct hl_device *hdev) +{ + hdev->asic_funcs = &goya_funcs; +} diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h new file mode 100644 index 000000000000..6a78976e2098 --- /dev/null +++ b/drivers/misc/habanalabs/goya/goyaP.h @@ -0,0 +1,152 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef GOYAP_H_ +#define GOYAP_H_ + +#include +#include "habanalabs.h" +#include "include/goya/goya.h" + +#define NUMBER_OF_CMPLT_QUEUES 5 +#define NUMBER_OF_EXT_HW_QUEUES 5 +#define NUMBER_OF_CPU_HW_QUEUES 1 +#define NUMBER_OF_INT_HW_QUEUES 9 +#define NUMBER_OF_HW_QUEUES (NUMBER_OF_EXT_HW_QUEUES + \ + NUMBER_OF_CPU_HW_QUEUES + \ + NUMBER_OF_INT_HW_QUEUES) + +/* + * Number of MSIX interrupts IDS: + * Each completion queue has 1 ID + * The event queue has 1 ID + */ +#define NUMBER_OF_INTERRUPTS (NUMBER_OF_CMPLT_QUEUES + 1) + +#if (NUMBER_OF_HW_QUEUES >= HL_MAX_QUEUES) +#error "Number of H/W queues must be smaller than HL_MAX_QUEUES" +#endif + +#if (NUMBER_OF_INTERRUPTS > GOYA_MSIX_ENTRIES) +#error "Number of MSIX interrupts must be smaller or equal to GOYA_MSIX_ENTRIES" +#endif + +#define QMAN_FENCE_TIMEOUT_USEC 10000 /* 10 ms */ + +#define QMAN_STOP_TIMEOUT_USEC 100000 /* 100 ms */ + +#define TPC_ENABLED_MASK 0xFF + +#define PLL_HIGH_DEFAULT 1575000000 /* 1.575 GHz */ + +#define GOYA_ARMCP_INFO_TIMEOUT 10000000 /* 10s */ + +#define DRAM_PHYS_DEFAULT_SIZE 0x100000000ull /* 4GB */ + +/* DRAM Memory Map */ + +#define CPU_FW_IMAGE_SIZE 0x10000000 /* 256MB */ +#define MMU_PAGE_TABLES_SIZE 0x0E000000 /* 224MB */ +#define MMU_CACHE_MNG_SIZE 0x00001000 /* 4KB */ +#define CPU_PQ_PKT_SIZE 0x00001000 /* 4KB */ +#define CPU_PQ_DATA_SIZE 0x01FFE000 /* 32MB - 8KB */ + +#define CPU_FW_IMAGE_ADDR DRAM_PHYS_BASE +#define MMU_PAGE_TABLES_ADDR (CPU_FW_IMAGE_ADDR + CPU_FW_IMAGE_SIZE) +#define MMU_CACHE_MNG_ADDR (MMU_PAGE_TABLES_ADDR + MMU_PAGE_TABLES_SIZE) +#define CPU_PQ_PKT_ADDR (MMU_CACHE_MNG_ADDR + MMU_CACHE_MNG_SIZE) +#define CPU_PQ_DATA_ADDR (CPU_PQ_PKT_ADDR + CPU_PQ_PKT_SIZE) +#define DRAM_BASE_ADDR_USER (CPU_PQ_DATA_ADDR + CPU_PQ_DATA_SIZE) + +#if (DRAM_BASE_ADDR_USER != 0x20000000) +#error "KMD must reserve 512MB" +#endif + +/* + * SRAM Memory Map for KMD + * + * KMD occupies KMD_SRAM_SIZE bytes from the start of SRAM. It is used for + * MME/TPC QMANs + * + */ + +#define MME_QMAN_BASE_OFFSET 0x000000 /* Must be 0 */ +#define MME_QMAN_LENGTH 64 +#define TPC_QMAN_LENGTH 64 + +#define TPC0_QMAN_BASE_OFFSET (MME_QMAN_BASE_OFFSET + \ + (MME_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC1_QMAN_BASE_OFFSET (TPC0_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC2_QMAN_BASE_OFFSET (TPC1_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC3_QMAN_BASE_OFFSET (TPC2_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC4_QMAN_BASE_OFFSET (TPC3_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC5_QMAN_BASE_OFFSET (TPC4_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC6_QMAN_BASE_OFFSET (TPC5_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) +#define TPC7_QMAN_BASE_OFFSET (TPC6_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) + +#define SRAM_KMD_RES_OFFSET (TPC7_QMAN_BASE_OFFSET + \ + (TPC_QMAN_LENGTH * QMAN_PQ_ENTRY_SIZE)) + +#if (SRAM_KMD_RES_OFFSET >= GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START) +#error "MME/TPC QMANs SRAM space exceeds limit" +#endif + +#define SRAM_USER_BASE_OFFSET GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START + +/* Virtual address space */ +#define VA_HOST_SPACE_START 0x1000000000000ull /* 256TB */ +#define VA_HOST_SPACE_END 0x3FF8000000000ull /* 1PB - 1TB */ +#define VA_HOST_SPACE_SIZE (VA_HOST_SPACE_END - \ + VA_HOST_SPACE_START) /* 767TB */ + +#define VA_DDR_SPACE_START 0x800000000ull /* 32GB */ +#define VA_DDR_SPACE_END 0x2000000000ull /* 128GB */ +#define VA_DDR_SPACE_SIZE (VA_DDR_SPACE_END - \ + VA_DDR_SPACE_START) /* 128GB */ + +#define DMA_MAX_TRANSFER_SIZE 0xFFFFFFFF + +#define HW_CAP_PLL 0x00000001 +#define HW_CAP_DDR_0 0x00000002 +#define HW_CAP_DDR_1 0x00000004 +#define HW_CAP_MME 0x00000008 +#define HW_CAP_CPU 0x00000010 +#define HW_CAP_DMA 0x00000020 +#define HW_CAP_MSIX 0x00000040 +#define HW_CAP_CPU_Q 0x00000080 +#define HW_CAP_MMU 0x00000100 +#define HW_CAP_TPC_MBIST 0x00000200 +#define HW_CAP_GOLDEN 0x00000400 +#define HW_CAP_TPC 0x00000800 + +#define CPU_PKT_SHIFT 5 +#define CPU_PKT_SIZE (1 << CPU_PKT_SHIFT) +#define CPU_PKT_MASK (~((1 << CPU_PKT_SHIFT) - 1)) +#define CPU_MAX_PKTS_IN_CB 32 +#define CPU_CB_SIZE (CPU_PKT_SIZE * CPU_MAX_PKTS_IN_CB) +#define CPU_ACCESSIBLE_MEM_SIZE (HL_QUEUE_LENGTH * CPU_CB_SIZE) + +enum goya_fw_component { + FW_COMP_UBOOT, + FW_COMP_PREBOOT +}; + +struct goya_device { + /* TODO: remove hw_queues_lock after moving to scheduler code */ + spinlock_t hw_queues_lock; + u64 ddr_bar_cur_addr; + u32 hw_cap_initialized; +}; + +#endif /* GOYAP_H_ */ diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h index fa628b05db13..5076e680a73c 100644 --- a/drivers/misc/habanalabs/habanalabs.h +++ b/drivers/misc/habanalabs/habanalabs.h @@ -14,9 +14,62 @@ #define HL_NAME "habanalabs" +#define HL_MAX_QUEUES 128 + struct hl_device; +/** + * struct asic_fixed_properties - ASIC specific immutable properties. + * @sram_base_address: SRAM physical start address. + * @sram_end_address: SRAM physical end address. + * @sram_user_base_address - SRAM physical start address for user access. + * @dram_base_address: DRAM physical start address. + * @dram_end_address: DRAM physical end address. + * @dram_user_base_address: DRAM physical start address for user access. + * @dram_size: DRAM total size. + * @dram_pci_bar_size: size of PCI bar towards DRAM. + * @host_phys_base_address: base physical address of host memory for + * transactions that the device generates. + * @va_space_host_start_address: base address of virtual memory range for + * mapping host memory. + * @va_space_host_end_address: end address of virtual memory range for + * mapping host memory. + * @va_space_dram_start_address: base address of virtual memory range for + * mapping DRAM memory. + * @va_space_dram_end_address: end address of virtual memory range for + * mapping DRAM memory. + * @cfg_size: configuration space size on SRAM. + * @sram_size: total size of SRAM. + * @max_asid: maximum number of open contexts (ASIDs). + * @completion_queues_count: number of completion queues. + * @high_pll: high PLL frequency used by the device. + * @tpc_enabled_mask: which TPCs are enabled. + */ +struct asic_fixed_properties { + u64 sram_base_address; + u64 sram_end_address; + u64 sram_user_base_address; + u64 dram_base_address; + u64 dram_end_address; + u64 dram_user_base_address; + u64 dram_size; + u64 dram_pci_bar_size; + u64 host_phys_base_address; + u64 va_space_host_start_address; + u64 va_space_host_end_address; + u64 va_space_dram_start_address; + u64 va_space_dram_end_address; + u32 cfg_size; + u32 sram_size; + u32 max_asid; + u32 high_pll; + u8 completion_queues_count; + u8 tpc_enabled_mask; +}; + + +#define HL_QUEUE_LENGTH 256 /* * ASICs */ @@ -33,6 +86,36 @@ enum hl_asic_type { ASIC_INVALID }; +/** + * struct hl_asic_funcs - ASIC specific functions that are can be called from + * common code. + * @early_init: sets up early driver state (pre sw_init), doesn't configure H/W. + * @early_fini: tears down what was done in early_init. + * @sw_init: sets up driver state, does not configure H/W. + * @sw_fini: tears down driver state, does not configure H/W. + * @suspend: handles IP specific H/W or SW changes for suspend. + * @resume: handles IP specific H/W or SW changes for resume. + * @dma_alloc_coherent: Allocate coherent DMA memory by calling + * dma_alloc_coherent(). This is ASIC function because its + * implementation is not trivial when the driver is loaded + * in simulation mode (not upstreamed). + * @dma_free_coherent: Free coherent DMA memory by calling dma_free_coherent(). + * This is ASIC function because its implementation is not + * trivial when the driver is loaded in simulation mode + * (not upstreamed). + */ +struct hl_asic_funcs { + int (*early_init)(struct hl_device *hdev); + int (*early_fini)(struct hl_device *hdev); + int (*sw_init)(struct hl_device *hdev); + int (*sw_fini)(struct hl_device *hdev); + int (*suspend)(struct hl_device *hdev); + int (*resume)(struct hl_device *hdev); + void* (*dma_alloc_coherent)(struct hl_device *hdev, size_t size, + dma_addr_t *dma_handle, gfp_t flag); + void (*dma_free_coherent)(struct hl_device *hdev, size_t size, + void *cpu_addr, dma_addr_t dma_handle); +}; /* * FILE PRIVATE STRUCTURE @@ -62,26 +145,78 @@ struct hl_fpriv { */ #define HL_MAX_MINORS 256 +/* + * Registers read & write functions. + */ + +u32 hl_rreg(struct hl_device *hdev, u32 reg); +void hl_wreg(struct hl_device *hdev, u32 reg, u32 val); + +#define hl_poll_timeout(hdev, addr, val, cond, sleep_us, timeout_us) \ + readl_poll_timeout(hdev->rmmio + addr, val, cond, sleep_us, timeout_us) + +#define RREG32(reg) hl_rreg(hdev, (reg)) +#define WREG32(reg, v) hl_wreg(hdev, (reg), (v)) +#define DREG32(reg) pr_info("REGISTER: " #reg " : 0x%08X\n", \ + hl_rreg(hdev, (reg))) + +#define WREG32_P(reg, val, mask) \ + do { \ + u32 tmp_ = RREG32(reg); \ + tmp_ &= (mask); \ + tmp_ |= ((val) & ~(mask)); \ + WREG32(reg, tmp_); \ + } while (0) +#define WREG32_AND(reg, and) WREG32_P(reg, 0, and) +#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) + +#define REG_FIELD_SHIFT(reg, field) reg##_##field##_SHIFT +#define REG_FIELD_MASK(reg, field) reg##_##field##_MASK +#define WREG32_FIELD(reg, field, val) \ + WREG32(mm##reg, (RREG32(mm##reg) & ~REG_FIELD_MASK(reg, field)) | \ + (val) << REG_FIELD_SHIFT(reg, field)) + /** * struct hl_device - habanalabs device structure. * @pdev: pointer to PCI device, can be NULL in case of simulator device. + * @pcie_bar: array of available PCIe bars. + * @rmmio: configuration area address on SRAM. * @cdev: related char device. * @dev: realted kernel basic device structure. * @asic_name: ASIC specific nmae. * @asic_type: ASIC specific type. + * @dma_pool: DMA pool for small allocations. + * @cpu_accessible_dma_mem: KMD <-> ArmCP shared memory CPU address. + * @cpu_accessible_dma_address: KMD <-> ArmCP shared memory DMA address. + * @cpu_accessible_dma_pool: KMD <-> ArmCP shared memory pool. + * @asic_prop: ASIC specific immutable properties. + * @asic_funcs: ASIC specific functions. + * @asic_specific: ASIC specific information to use only from ASIC files. * @major: habanalabs KMD major. * @id: device minor. * @disabled: is device disabled. */ struct hl_device { struct pci_dev *pdev; + void __iomem *pcie_bar[6]; + void __iomem *rmmio; struct cdev cdev; struct device *dev; char asic_name[16]; enum hl_asic_type asic_type; + struct dma_pool *dma_pool; + void *cpu_accessible_dma_mem; + dma_addr_t cpu_accessible_dma_address; + struct gen_pool *cpu_accessible_dma_pool; + struct asic_fixed_properties asic_prop; + const struct hl_asic_funcs *asic_funcs; + void *asic_specific; u32 major; u16 id; u8 disabled; + + /* Parameters for bring-up */ + u8 reset_pcilink; }; @@ -128,4 +263,6 @@ void hl_device_fini(struct hl_device *hdev); int hl_device_suspend(struct hl_device *hdev); int hl_device_resume(struct hl_device *hdev); +void goya_set_asic_funcs(struct hl_device *hdev); + #endif /* HABANALABSP_H_ */ diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c index a5251ed277d1..edf626b2b7b1 100644 --- a/drivers/misc/habanalabs/habanalabs_drv.c +++ b/drivers/misc/habanalabs/habanalabs_drv.c @@ -122,6 +122,9 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev, hdev->major = hl_major; + /* Parameters for bring-up - set them to defaults */ + hdev->reset_pcilink = 0; + hdev->disabled = true; hdev->pdev = pdev; /* can be NULL in case of simulator device */ diff --git a/drivers/misc/habanalabs/include/goya/goya.h b/drivers/misc/habanalabs/include/goya/goya.h new file mode 100644 index 000000000000..614149efa412 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/goya.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef GOYA_H +#define GOYA_H + +#include "asic_reg/goya_regs.h" + +#include + +#define SRAM_CFG_BAR_ID 0 +#define MSIX_BAR_ID 2 +#define DDR_BAR_ID 4 + +#define CFG_BAR_SIZE 0x10000000ull /* 256MB */ +#define MSIX_BAR_SIZE 0x1000ull /* 4KB */ + +#define CFG_BASE 0x7FFC000000ull +#define CFG_SIZE 0x4000000 /* 32MB CFG + 32MB DBG*/ + +#define SRAM_BASE_ADDR 0x7FF0000000ull +#define SRAM_SIZE 0x32A0000 /* 50.625MB */ + +#define DRAM_PHYS_BASE 0x0ull + +#define HOST_PHYS_BASE 0x8000000000ull /* 0.5TB */ +#define HOST_PHYS_SIZE 0x1000000000000ull /* 0.25PB (48 bits) */ + +#define GOYA_MSIX_ENTRIES 8 + +#define QMAN_PQ_ENTRY_SIZE 16 /* Bytes */ + +#define MAX_ASID 1024 + +#define PROT_BITS_OFFS 0xF80 + +#define DMA_MAX_NUM 5 + +#define TPC_MAX_NUM 8 + +#endif /* GOYA_H */ diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h new file mode 100644 index 000000000000..a0ec23adf8f5 --- /dev/null +++ b/include/uapi/misc/habanalabs.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef HABANALABS_H_ +#define HABANALABS_H_ + +#include +#include + +/* + * Defines that are asic-specific but constitutes as ABI between kernel driver + * and userspace + */ +#define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START 0x8000 /* 32KB */ + +#endif /* HABANALABS_H_ */ -- cgit v1.2.3-71-gd317 From be5d926b5c10430671ae975b80efb7a5652e3f9a Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Sat, 16 Feb 2019 00:39:15 +0200 Subject: habanalabs: add command buffer module This patch adds the command buffer (CB) module, which allows the user to create and destroy CBs and to map them to the user's process address-space. A command buffer is a memory blocks that reside in DMA-able address-space and is physically contiguous so it can be accessed by the device without MMU translation. The command buffer memory is allocated using the coherent DMA API. When creating a new CB, the IOCTL returns a handle of it, and the user-space process needs to use that handle to mmap the buffer to get a VA in the user's address-space. Before destroying (freeing) a CB, the user must unmap the CB's VA using the CB handle. Each CB has a reference counter, which tracks its usage in command submissions and also its mmaps (only a single mmap is allowed). The driver maintains a pool of pre-allocated CBs in order to reduce latency during command submissions. In case the pool is empty, the driver will go to the slow-path of allocating a new CB, i.e. calling dma_alloc_coherent. Reviewed-by: Mike Rapoport Signed-off-by: Oded Gabbay Signed-off-by: Greg Kroah-Hartman --- drivers/misc/habanalabs/Makefile | 3 +- drivers/misc/habanalabs/command_buffer.c | 433 +++++++++++++++++++++++++++++ drivers/misc/habanalabs/device.c | 43 ++- drivers/misc/habanalabs/goya/goya.c | 28 ++ drivers/misc/habanalabs/habanalabs.h | 86 ++++++ drivers/misc/habanalabs/habanalabs_drv.c | 2 + drivers/misc/habanalabs/habanalabs_ioctl.c | 99 +++++++ include/uapi/misc/habanalabs.h | 46 +++ 8 files changed, 738 insertions(+), 2 deletions(-) create mode 100644 drivers/misc/habanalabs/command_buffer.c create mode 100644 drivers/misc/habanalabs/habanalabs_ioctl.c (limited to 'include') diff --git a/drivers/misc/habanalabs/Makefile b/drivers/misc/habanalabs/Makefile index 3ffbadc2ca01..2530c9b78ca4 100644 --- a/drivers/misc/habanalabs/Makefile +++ b/drivers/misc/habanalabs/Makefile @@ -4,7 +4,8 @@ obj-m := habanalabs.o -habanalabs-y := habanalabs_drv.o device.o context.o asid.o +habanalabs-y := habanalabs_drv.o device.o context.o asid.o habanalabs_ioctl.o \ + command_buffer.o include $(src)/goya/Makefile habanalabs-y += $(HL_GOYA_FILES) diff --git a/drivers/misc/habanalabs/command_buffer.c b/drivers/misc/habanalabs/command_buffer.c new file mode 100644 index 000000000000..e659ca3035e4 --- /dev/null +++ b/drivers/misc/habanalabs/command_buffer.c @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include +#include "habanalabs.h" + +#include +#include + +static void cb_fini(struct hl_device *hdev, struct hl_cb *cb) +{ + hdev->asic_funcs->dma_free_coherent(hdev, cb->size, + (void *) (uintptr_t) cb->kernel_address, + cb->bus_address); + kfree(cb); +} + +static void cb_do_release(struct hl_device *hdev, struct hl_cb *cb) +{ + if (cb->is_pool) { + spin_lock(&hdev->cb_pool_lock); + list_add(&cb->pool_list, &hdev->cb_pool); + spin_unlock(&hdev->cb_pool_lock); + } else { + cb_fini(hdev, cb); + } +} + +static void cb_release(struct kref *ref) +{ + struct hl_device *hdev; + struct hl_cb *cb; + + cb = container_of(ref, struct hl_cb, refcount); + hdev = cb->hdev; + + cb_do_release(hdev, cb); +} + +static struct hl_cb *hl_cb_alloc(struct hl_device *hdev, u32 cb_size, + int ctx_id) +{ + struct hl_cb *cb; + void *p; + + /* + * We use of GFP_ATOMIC here because this function can be called from + * the latency-sensitive code path for command submission. Due to H/W + * limitations in some of the ASICs, the kernel must copy the user CB + * that is designated for an external queue and actually enqueue + * the kernel's copy. Hence, we must never sleep in this code section + * and must use GFP_ATOMIC for all memory allocations. + */ + if (ctx_id == HL_KERNEL_ASID_ID) + cb = kzalloc(sizeof(*cb), GFP_ATOMIC); + else + cb = kzalloc(sizeof(*cb), GFP_KERNEL); + + if (!cb) + return NULL; + + if (ctx_id == HL_KERNEL_ASID_ID) + p = hdev->asic_funcs->dma_alloc_coherent(hdev, cb_size, + &cb->bus_address, GFP_ATOMIC); + else + p = hdev->asic_funcs->dma_alloc_coherent(hdev, cb_size, + &cb->bus_address, + GFP_USER | __GFP_ZERO); + if (!p) { + dev_err(hdev->dev, + "failed to allocate %d of dma memory for CB\n", + cb_size); + kfree(cb); + return NULL; + } + + cb->kernel_address = (u64) (uintptr_t) p; + cb->size = cb_size; + + return cb; +} + +int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, + u32 cb_size, u64 *handle, int ctx_id) +{ + struct hl_cb *cb; + bool alloc_new_cb = true; + int rc; + + if (hdev->disabled) { + dev_warn_ratelimited(hdev->dev, + "Device is disabled. Can't create new CBs\n"); + rc = -EBUSY; + goto out_err; + } + + if (cb_size > HL_MAX_CB_SIZE) { + dev_err(hdev->dev, + "CB size %d must be less then %d\n", + cb_size, HL_MAX_CB_SIZE); + rc = -EINVAL; + goto out_err; + } + + /* Minimum allocation must be PAGE SIZE */ + if (cb_size < PAGE_SIZE) + cb_size = PAGE_SIZE; + + if (ctx_id == HL_KERNEL_ASID_ID && + cb_size <= hdev->asic_prop.cb_pool_cb_size) { + + spin_lock(&hdev->cb_pool_lock); + if (!list_empty(&hdev->cb_pool)) { + cb = list_first_entry(&hdev->cb_pool, typeof(*cb), + pool_list); + list_del(&cb->pool_list); + spin_unlock(&hdev->cb_pool_lock); + alloc_new_cb = false; + } else { + spin_unlock(&hdev->cb_pool_lock); + dev_dbg(hdev->dev, "CB pool is empty\n"); + } + } + + if (alloc_new_cb) { + cb = hl_cb_alloc(hdev, cb_size, ctx_id); + if (!cb) { + rc = -ENOMEM; + goto out_err; + } + } + + cb->hdev = hdev; + cb->ctx_id = ctx_id; + + spin_lock(&mgr->cb_lock); + rc = idr_alloc(&mgr->cb_handles, cb, 1, 0, GFP_ATOMIC); + spin_unlock(&mgr->cb_lock); + + if (rc < 0) { + dev_err(hdev->dev, "Failed to allocate IDR for a new CB\n"); + goto release_cb; + } + + cb->id = rc; + + kref_init(&cb->refcount); + spin_lock_init(&cb->lock); + + /* + * idr is 32-bit so we can safely OR it with a mask that is above + * 32 bit + */ + *handle = cb->id | HL_MMAP_CB_MASK; + *handle <<= PAGE_SHIFT; + + return 0; + +release_cb: + cb_do_release(hdev, cb); +out_err: + *handle = 0; + + return rc; +} + +int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle) +{ + struct hl_cb *cb; + u32 handle; + int rc = 0; + + /* + * handle was given to user to do mmap, I need to shift it back to + * how the idr module gave it to me + */ + cb_handle >>= PAGE_SHIFT; + handle = (u32) cb_handle; + + spin_lock(&mgr->cb_lock); + + cb = idr_find(&mgr->cb_handles, handle); + if (cb) { + idr_remove(&mgr->cb_handles, handle); + spin_unlock(&mgr->cb_lock); + kref_put(&cb->refcount, cb_release); + } else { + spin_unlock(&mgr->cb_lock); + dev_err(hdev->dev, + "CB destroy failed, no match to handle 0x%x\n", handle); + rc = -EINVAL; + } + + return rc; +} + +int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data) +{ + union hl_cb_args *args = data; + struct hl_device *hdev = hpriv->hdev; + u64 handle; + int rc; + + switch (args->in.op) { + case HL_CB_OP_CREATE: + rc = hl_cb_create(hdev, &hpriv->cb_mgr, args->in.cb_size, + &handle, hpriv->ctx->asid); + memset(args, 0, sizeof(*args)); + args->out.cb_handle = handle; + break; + case HL_CB_OP_DESTROY: + rc = hl_cb_destroy(hdev, &hpriv->cb_mgr, + args->in.cb_handle); + break; + default: + rc = -ENOTTY; + break; + } + + return rc; +} + +static void cb_vm_close(struct vm_area_struct *vma) +{ + struct hl_cb *cb = (struct hl_cb *) vma->vm_private_data; + + cb->mmap_size -= vma->vm_end - vma->vm_start; + + if (cb->mmap_size) + return; + + spin_lock(&cb->lock); + cb->mmap = false; + spin_unlock(&cb->lock); + + hl_cb_put(cb); + vma->vm_private_data = NULL; +} + +static const struct vm_operations_struct cb_vm_ops = { + .close = cb_vm_close +}; + +int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_cb *cb; + phys_addr_t address; + u32 handle; + int rc; + + handle = vma->vm_pgoff; + + /* reference was taken here */ + cb = hl_cb_get(hdev, &hpriv->cb_mgr, handle); + if (!cb) { + dev_err(hdev->dev, + "CB mmap failed, no match to handle %d\n", handle); + return -EINVAL; + } + + /* Validation check */ + if ((vma->vm_end - vma->vm_start) != cb->size) { + dev_err(hdev->dev, + "CB mmap failed, mmap size 0x%lx != 0x%x cb size\n", + vma->vm_end - vma->vm_start, cb->size); + rc = -EINVAL; + goto put_cb; + } + + spin_lock(&cb->lock); + + if (cb->mmap) { + dev_err(hdev->dev, + "CB mmap failed, CB already mmaped to user\n"); + rc = -EINVAL; + goto release_lock; + } + + cb->mmap = true; + + spin_unlock(&cb->lock); + + vma->vm_ops = &cb_vm_ops; + + /* + * Note: We're transferring the cb reference to + * vma->vm_private_data here. + */ + + vma->vm_private_data = cb; + + /* Calculate address for CB */ + address = virt_to_phys((void *) (uintptr_t) cb->kernel_address); + + rc = hdev->asic_funcs->cb_mmap(hdev, vma, cb->kernel_address, + address, cb->size); + + if (rc) { + spin_lock(&cb->lock); + cb->mmap = false; + goto release_lock; + } + + cb->mmap_size = cb->size; + + return 0; + +release_lock: + spin_unlock(&cb->lock); +put_cb: + hl_cb_put(cb); + return rc; +} + +struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr, + u32 handle) +{ + struct hl_cb *cb; + + spin_lock(&mgr->cb_lock); + cb = idr_find(&mgr->cb_handles, handle); + + if (!cb) { + spin_unlock(&mgr->cb_lock); + dev_warn(hdev->dev, + "CB get failed, no match to handle %d\n", handle); + return NULL; + } + + kref_get(&cb->refcount); + + spin_unlock(&mgr->cb_lock); + + return cb; + +} + +void hl_cb_put(struct hl_cb *cb) +{ + kref_put(&cb->refcount, cb_release); +} + +void hl_cb_mgr_init(struct hl_cb_mgr *mgr) +{ + spin_lock_init(&mgr->cb_lock); + idr_init(&mgr->cb_handles); +} + +void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr) +{ + struct hl_cb *cb; + struct idr *idp; + u32 id; + + idp = &mgr->cb_handles; + + idr_for_each_entry(idp, cb, id) { + if (kref_put(&cb->refcount, cb_release) != 1) + dev_err(hdev->dev, + "CB %d for CTX ID %d is still alive\n", + id, cb->ctx_id); + } + + idr_destroy(&mgr->cb_handles); +} + +struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size) +{ + u64 cb_handle; + struct hl_cb *cb; + int rc; + + rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, cb_size, &cb_handle, + HL_KERNEL_ASID_ID); + if (rc) { + dev_err(hdev->dev, "Failed to allocate CB for KMD %d\n", rc); + return NULL; + } + + cb_handle >>= PAGE_SHIFT; + cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, (u32) cb_handle); + /* hl_cb_get should never fail here so use kernel WARN */ + WARN(!cb, "Kernel CB handle invalid 0x%x\n", (u32) cb_handle); + if (!cb) + goto destroy_cb; + + return cb; + +destroy_cb: + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb_handle << PAGE_SHIFT); + + return NULL; +} + +int hl_cb_pool_init(struct hl_device *hdev) +{ + struct hl_cb *cb; + int i; + + INIT_LIST_HEAD(&hdev->cb_pool); + spin_lock_init(&hdev->cb_pool_lock); + + for (i = 0 ; i < hdev->asic_prop.cb_pool_cb_cnt ; i++) { + cb = hl_cb_alloc(hdev, hdev->asic_prop.cb_pool_cb_size, + HL_KERNEL_ASID_ID); + if (cb) { + cb->is_pool = true; + list_add(&cb->pool_list, &hdev->cb_pool); + } else { + hl_cb_pool_fini(hdev); + return -ENOMEM; + } + } + + return 0; +} + +int hl_cb_pool_fini(struct hl_device *hdev) +{ + struct hl_cb *cb, *tmp; + + list_for_each_entry_safe(cb, tmp, &hdev->cb_pool, pool_list) { + list_del(&cb->pool_list); + cb_fini(hdev, cb); + } + + return 0; +} diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c index 2423588ecf22..77f0018e7aa9 100644 --- a/drivers/misc/habanalabs/device.c +++ b/drivers/misc/habanalabs/device.c @@ -52,6 +52,7 @@ static int hl_device_release(struct inode *inode, struct file *filp) { struct hl_fpriv *hpriv = filp->private_data; + hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr); hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr); filp->private_data = NULL; @@ -61,10 +62,34 @@ static int hl_device_release(struct inode *inode, struct file *filp) return 0; } +/* + * hl_mmap - mmap function for habanalabs device + * + * @*filp: pointer to file structure + * @*vma: pointer to vm_area_struct of the process + * + * Called when process does an mmap on habanalabs device. Call the device's mmap + * function at the end of the common code. + */ +static int hl_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct hl_fpriv *hpriv = filp->private_data; + + if ((vma->vm_pgoff & HL_MMAP_CB_MASK) == HL_MMAP_CB_MASK) { + vma->vm_pgoff ^= HL_MMAP_CB_MASK; + return hl_cb_mmap(hpriv, vma); + } + + return hpriv->hdev->asic_funcs->mmap(hpriv, vma); +} + static const struct file_operations hl_ops = { .owner = THIS_MODULE, .open = hl_device_open, - .release = hl_device_release + .release = hl_device_release, + .mmap = hl_mmap, + .unlocked_ioctl = hl_ioctl, + .compat_ioctl = hl_ioctl }; /* @@ -149,6 +174,8 @@ static int device_early_init(struct hl_device *hdev) if (rc) goto early_fini; + hl_cb_mgr_init(&hdev->kernel_cb_mgr); + mutex_init(&hdev->fd_open_cnt_lock); atomic_set(&hdev->fd_open_cnt, 0); @@ -170,6 +197,8 @@ early_fini: static void device_early_fini(struct hl_device *hdev) { + hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr); + hl_asid_fini(hdev); if (hdev->asic_funcs->early_fini) @@ -284,11 +313,21 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass) goto free_ctx; } + rc = hl_cb_pool_init(hdev); + if (rc) { + dev_err(hdev->dev, "failed to initialize CB pool\n"); + goto release_ctx; + } + dev_notice(hdev->dev, "Successfully added device to habanalabs driver\n"); return 0; +release_ctx: + if (hl_ctx_put(hdev->kernel_ctx) != 1) + dev_err(hdev->dev, + "kernel ctx is still alive on initialization failure\n"); free_ctx: kfree(hdev->kernel_ctx); sw_fini: @@ -325,6 +364,8 @@ void hl_device_fini(struct hl_device *hdev) /* Mark device as disabled */ hdev->disabled = true; + hl_cb_pool_fini(hdev); + /* Release kernel context */ if ((hdev->kernel_ctx) && (hl_ctx_put(hdev->kernel_ctx) != 1)) dev_err(hdev->dev, "kernel ctx is still alive\n"); diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index ceaffa9afd83..88b047f7d85d 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -82,6 +82,9 @@ #define GOYA_MAX_INITIATORS 20 +#define GOYA_CB_POOL_CB_CNT 512 +#define GOYA_CB_POOL_CB_SIZE 0x20000 /* 128KB */ + static void goya_get_fixed_properties(struct hl_device *hdev) { struct asic_fixed_properties *prop = &hdev->asic_prop; @@ -109,6 +112,8 @@ static void goya_get_fixed_properties(struct hl_device *hdev) prop->tpc_enabled_mask = TPC_ENABLED_MASK; prop->high_pll = PLL_HIGH_DEFAULT; + prop->cb_pool_cb_cnt = GOYA_CB_POOL_CB_CNT; + prop->cb_pool_cb_size = GOYA_CB_POOL_CB_SIZE; } /* @@ -597,6 +602,27 @@ int goya_resume(struct hl_device *hdev) return 0; } +int goya_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) +{ + return -EINVAL; +} + +int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma, + u64 kaddress, phys_addr_t paddress, u32 size) +{ + int rc; + + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | + VM_DONTCOPY | VM_NORESERVE; + + rc = remap_pfn_range(vma, vma->vm_start, paddress >> PAGE_SHIFT, + size, vma->vm_page_prot); + if (rc) + dev_err(hdev->dev, "remap_pfn_range error %d", rc); + + return rc; +} + void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle, gfp_t flags) { @@ -616,6 +642,8 @@ static const struct hl_asic_funcs goya_funcs = { .sw_fini = goya_sw_fini, .suspend = goya_suspend, .resume = goya_resume, + .mmap = goya_mmap, + .cb_mmap = goya_cb_mmap, .dma_alloc_coherent = goya_dma_alloc_coherent, .dma_free_coherent = goya_dma_free_coherent, }; diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h index ca8171ca3a04..63741c7224b6 100644 --- a/drivers/misc/habanalabs/habanalabs.h +++ b/drivers/misc/habanalabs/habanalabs.h @@ -14,9 +14,12 @@ #define HL_NAME "habanalabs" +#define HL_MMAP_CB_MASK (0x8000000000000000ull >> PAGE_SHIFT) + #define HL_MAX_QUEUES 128 struct hl_device; +struct hl_fpriv; /** @@ -44,6 +47,8 @@ struct hl_device; * @max_asid: maximum number of open contexts (ASIDs). * @completion_queues_count: number of completion queues. * @high_pll: high PLL frequency used by the device. + * @cb_pool_cb_cnt: number of CBs in the CB pool. + * @cb_pool_cb_size: size of each CB in the CB pool. * @tpc_enabled_mask: which TPCs are enabled. */ struct asic_fixed_properties { @@ -64,11 +69,60 @@ struct asic_fixed_properties { u32 sram_size; u32 max_asid; u32 high_pll; + u32 cb_pool_cb_cnt; + u32 cb_pool_cb_size; u8 completion_queues_count; u8 tpc_enabled_mask; }; +/* + * Command Buffers + */ + +#define HL_MAX_CB_SIZE 0x200000 /* 2MB */ + +/** + * struct hl_cb_mgr - describes a Command Buffer Manager. + * @cb_lock: protects cb_handles. + * @cb_handles: an idr to hold all command buffer handles. + */ +struct hl_cb_mgr { + spinlock_t cb_lock; + struct idr cb_handles; /* protected by cb_lock */ +}; + +/** + * struct hl_cb - describes a Command Buffer. + * @refcount: reference counter for usage of the CB. + * @hdev: pointer to device this CB belongs to. + * @lock: spinlock to protect mmap/cs flows. + * @pool_list: node in pool list of command buffers. + * @kernel_address: Holds the CB's kernel virtual address. + * @bus_address: Holds the CB's DMA address. + * @mmap_size: Holds the CB's size that was mmaped. + * @size: holds the CB's size. + * @id: the CB's ID. + * @ctx_id: holds the ID of the owner's context. + * @mmap: true if the CB is currently mmaped to user. + * @is_pool: true if CB was acquired from the pool, false otherwise. + */ +struct hl_cb { + struct kref refcount; + struct hl_device *hdev; + spinlock_t lock; + struct list_head pool_list; + u64 kernel_address; + dma_addr_t bus_address; + u32 mmap_size; + u32 size; + u32 id; + u32 ctx_id; + u8 mmap; + u8 is_pool; +}; + + #define HL_QUEUE_LENGTH 256 @@ -97,6 +151,8 @@ enum hl_asic_type { * @sw_fini: tears down driver state, does not configure H/W. * @suspend: handles IP specific H/W or SW changes for suspend. * @resume: handles IP specific H/W or SW changes for resume. + * @mmap: mmap function, does nothing. + * @cb_mmap: maps a CB. * @dma_alloc_coherent: Allocate coherent DMA memory by calling * dma_alloc_coherent(). This is ASIC function because its * implementation is not trivial when the driver is loaded @@ -113,6 +169,9 @@ struct hl_asic_funcs { int (*sw_fini)(struct hl_device *hdev); int (*suspend)(struct hl_device *hdev); int (*resume)(struct hl_device *hdev); + int (*mmap)(struct hl_fpriv *hpriv, struct vm_area_struct *vma); + int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma, + u64 kaddress, phys_addr_t paddress, u32 size); void* (*dma_alloc_coherent)(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle, gfp_t flag); void (*dma_free_coherent)(struct hl_device *hdev, size_t size, @@ -163,6 +222,7 @@ struct hl_ctx_mgr { * @taskpid: current process ID. * @ctx: current executing context. * @ctx_mgr: context manager to handle multiple context for this FD. + * @cb_mgr: command buffer manager to handle multiple buffers for this FD. * @refcount: number of related contexts. */ struct hl_fpriv { @@ -171,6 +231,7 @@ struct hl_fpriv { struct pid *taskpid; struct hl_ctx *ctx; /* TODO: remove for multiple ctx */ struct hl_ctx_mgr ctx_mgr; + struct hl_cb_mgr cb_mgr; struct kref refcount; }; @@ -225,6 +286,7 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val); * @asic_name: ASIC specific nmae. * @asic_type: ASIC specific type. * @kernel_ctx: KMD context structure. + * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs. * @dma_pool: DMA pool for small allocations. * @cpu_accessible_dma_mem: KMD <-> ArmCP shared memory CPU address. * @cpu_accessible_dma_address: KMD <-> ArmCP shared memory DMA address. @@ -240,6 +302,8 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val); * @asic_prop: ASIC specific immutable properties. * @asic_funcs: ASIC specific functions. * @asic_specific: ASIC specific information to use only from ASIC files. + * @cb_pool: list of preallocated CBs. + * @cb_pool_lock: protects the CB pool. * @user_ctx: current user context executing. * @fd_open_cnt: number of open user processes. * @major: habanalabs KMD major. @@ -255,6 +319,7 @@ struct hl_device { char asic_name[16]; enum hl_asic_type asic_type; struct hl_ctx *kernel_ctx; + struct hl_cb_mgr kernel_cb_mgr; struct dma_pool *dma_pool; void *cpu_accessible_dma_mem; dma_addr_t cpu_accessible_dma_address; @@ -266,6 +331,10 @@ struct hl_device { struct asic_fixed_properties asic_prop; const struct hl_asic_funcs *asic_funcs; void *asic_specific; + + struct list_head cb_pool; + spinlock_t cb_pool_lock; + /* TODO: remove user_ctx for multiple process support */ struct hl_ctx *user_ctx; atomic_t fd_open_cnt; @@ -334,6 +403,23 @@ int hl_device_resume(struct hl_device *hdev); void hl_hpriv_get(struct hl_fpriv *hpriv); void hl_hpriv_put(struct hl_fpriv *hpriv); +int hl_cb_create(struct hl_device *hdev, struct hl_cb_mgr *mgr, u32 cb_size, + u64 *handle, int ctx_id); +int hl_cb_destroy(struct hl_device *hdev, struct hl_cb_mgr *mgr, u64 cb_handle); +int hl_cb_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma); +struct hl_cb *hl_cb_get(struct hl_device *hdev, struct hl_cb_mgr *mgr, + u32 handle); +void hl_cb_put(struct hl_cb *cb); +void hl_cb_mgr_init(struct hl_cb_mgr *mgr); +void hl_cb_mgr_fini(struct hl_device *hdev, struct hl_cb_mgr *mgr); +struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size); +int hl_cb_pool_init(struct hl_device *hdev); +int hl_cb_pool_fini(struct hl_device *hdev); + void goya_set_asic_funcs(struct hl_device *hdev); +/* IOCTLs */ +long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); +int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data); + #endif /* HABANALABSP_H_ */ diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c index 6fddd801aca3..8628d1d8f037 100644 --- a/drivers/misc/habanalabs/habanalabs_drv.c +++ b/drivers/misc/habanalabs/habanalabs_drv.c @@ -116,6 +116,7 @@ int hl_device_open(struct inode *inode, struct file *filp) kref_init(&hpriv->refcount); nonseekable_open(inode, filp); + hl_cb_mgr_init(&hpriv->cb_mgr); hl_ctx_mgr_init(&hpriv->ctx_mgr); rc = hl_ctx_create(hdev, hpriv); @@ -131,6 +132,7 @@ int hl_device_open(struct inode *inode, struct file *filp) out_err: filp->private_data = NULL; hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr); + hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr); kfree(hpriv); close_device: diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c new file mode 100644 index 000000000000..e53265fe9543 --- /dev/null +++ b/drivers/misc/habanalabs/habanalabs_ioctl.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include +#include "habanalabs.h" + +#include +#include +#include + +#define HL_IOCTL_DEF(ioctl, _func) \ + [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func} + +static const struct hl_ioctl_desc hl_ioctls[] = { + HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl) +}; + +#define HL_CORE_IOCTL_COUNT ARRAY_SIZE(hl_ioctls) + +long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) +{ + struct hl_fpriv *hpriv = filep->private_data; + struct hl_device *hdev = hpriv->hdev; + hl_ioctl_t *func; + const struct hl_ioctl_desc *ioctl = NULL; + unsigned int nr = _IOC_NR(cmd); + char stack_kdata[128] = {0}; + char *kdata = NULL; + unsigned int usize, asize; + int retcode; + + if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) { + u32 hl_size; + + ioctl = &hl_ioctls[nr]; + + hl_size = _IOC_SIZE(ioctl->cmd); + usize = asize = _IOC_SIZE(cmd); + if (hl_size > asize) + asize = hl_size; + + cmd = ioctl->cmd; + } else { + dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n", + task_pid_nr(current), nr); + return -ENOTTY; + } + + /* Do not trust userspace, use our own definition */ + func = ioctl->func; + + if (unlikely(!func)) { + dev_dbg(hdev->dev, "no function\n"); + retcode = -ENOTTY; + goto out_err; + } + + if (cmd & (IOC_IN | IOC_OUT)) { + if (asize <= sizeof(stack_kdata)) { + kdata = stack_kdata; + } else { + kdata = kzalloc(asize, GFP_KERNEL); + if (!kdata) { + retcode = -ENOMEM; + goto out_err; + } + } + } + + if (cmd & IOC_IN) { + if (copy_from_user(kdata, (void __user *)arg, usize)) { + retcode = -EFAULT; + goto out_err; + } + } else if (cmd & IOC_OUT) { + memset(kdata, 0, usize); + } + + retcode = func(hpriv, kdata); + + if (cmd & IOC_OUT) + if (copy_to_user((void __user *)arg, kdata, usize)) + retcode = -EFAULT; + +out_err: + if (retcode) + dev_dbg(hdev->dev, + "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n", + task_pid_nr(current), cmd, nr); + + if (kdata != stack_kdata) + kfree(kdata); + + return retcode; +} diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index a0ec23adf8f5..a8edfd3e9c95 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -17,4 +17,50 @@ */ #define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START 0x8000 /* 32KB */ +/* Opcode to create a new command buffer */ +#define HL_CB_OP_CREATE 0 +/* Opcode to destroy previously created command buffer */ +#define HL_CB_OP_DESTROY 1 + +struct hl_cb_in { + /* Handle of CB or 0 if we want to create one */ + __u64 cb_handle; + /* HL_CB_OP_* */ + __u32 op; + /* Size of CB. Minimum requested size must be PAGE_SIZE */ + __u32 cb_size; + /* Context ID - Currently not in use */ + __u32 ctx_id; + __u32 pad; +}; + +struct hl_cb_out { + /* Handle of CB */ + __u64 cb_handle; +}; + +union hl_cb_args { + struct hl_cb_in in; + struct hl_cb_out out; +}; + +/* + * Command Buffer + * - Request a Command Buffer + * - Destroy a Command Buffer + * + * The command buffers are memory blocks that reside in DMA-able address + * space and are physically contiguous so they can be accessed by the device + * directly. They are allocated using the coherent DMA API. + * + * When creating a new CB, the IOCTL returns a handle of it, and the user-space + * process needs to use that handle to mmap the buffer so it can access them. + * + */ +#define HL_IOCTL_CB \ + _IOWR('H', 0x02, union hl_cb_args) + +#define HL_COMMAND_START 0x02 +#define HL_COMMAND_END 0x03 + #endif /* HABANALABS_H_ */ -- cgit v1.2.3-71-gd317 From 9494a8dd8d22cbff8ce358aaa223fffe1b070cb0 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Sat, 16 Feb 2019 00:39:17 +0200 Subject: habanalabs: add h/w queues module This patch adds the H/W queues module and the code to initialize Goya's various compute and DMA engines and their queues. Goya has 5 DMA channels, 8 TPC engines and a single MME engine. For each channel/engine, there is a H/W queue logic which is used to pass commands from the user to the H/W. That logic is called QMAN. There are two types of QMANs: external and internal. The DMA QMANs are considered external while the TPC and MME QMANs are considered internal. For each external queue there is a completion queue, which is located on the Host memory. The differences between external and internal QMANs are: 1. The location of the queue's memory. External QMANs are located on the Host memory while internal QMANs are located on the on-chip memory. 2. The external QMAN write an entry to a completion queue and sends an MSI-X interrupt upon completion of a command buffer that was given to it. The internal QMAN doesn't do that. Reviewed-by: Mike Rapoport Signed-off-by: Oded Gabbay Signed-off-by: Greg Kroah-Hartman --- drivers/misc/habanalabs/Makefile | 2 +- drivers/misc/habanalabs/device.c | 75 +- drivers/misc/habanalabs/goya/goya.c | 1527 ++++++++++++++++++-- drivers/misc/habanalabs/goya/goyaP.h | 7 + drivers/misc/habanalabs/habanalabs.h | 174 ++- drivers/misc/habanalabs/habanalabs_drv.c | 5 + drivers/misc/habanalabs/hw_queue.c | 400 +++++ drivers/misc/habanalabs/include/armcp_if.h | 292 ++++ .../habanalabs/include/goya/goya_async_events.h | 186 +++ .../misc/habanalabs/include/goya/goya_packets.h | 129 ++ drivers/misc/habanalabs/include/qman_if.h | 56 + drivers/misc/habanalabs/irq.c | 149 ++ include/uapi/misc/habanalabs.h | 29 + 13 files changed, 2915 insertions(+), 116 deletions(-) create mode 100644 drivers/misc/habanalabs/hw_queue.c create mode 100644 drivers/misc/habanalabs/include/goya/goya_async_events.h create mode 100644 drivers/misc/habanalabs/include/goya/goya_packets.h create mode 100644 drivers/misc/habanalabs/include/qman_if.h create mode 100644 drivers/misc/habanalabs/irq.c (limited to 'include') diff --git a/drivers/misc/habanalabs/Makefile b/drivers/misc/habanalabs/Makefile index 2530c9b78ca4..c07f3ccb57dc 100644 --- a/drivers/misc/habanalabs/Makefile +++ b/drivers/misc/habanalabs/Makefile @@ -5,7 +5,7 @@ obj-m := habanalabs.o habanalabs-y := habanalabs_drv.o device.o context.o asid.o habanalabs_ioctl.o \ - command_buffer.o + command_buffer.o hw_queue.o irq.o include $(src)/goya/Makefile habanalabs-y += $(HL_GOYA_FILES) diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c index 5eefc2952be5..06e2b7f32499 100644 --- a/drivers/misc/habanalabs/device.c +++ b/drivers/misc/habanalabs/device.c @@ -174,13 +174,23 @@ static int device_early_init(struct hl_device *hdev) if (rc) goto early_fini; + hdev->cq_wq = alloc_workqueue("hl-free-jobs", WQ_UNBOUND, 0); + if (hdev->cq_wq == NULL) { + dev_err(hdev->dev, "Failed to allocate CQ workqueue\n"); + rc = -ENOMEM; + goto asid_fini; + } + hl_cb_mgr_init(&hdev->kernel_cb_mgr); mutex_init(&hdev->fd_open_cnt_lock); + mutex_init(&hdev->send_cpu_message_lock); atomic_set(&hdev->fd_open_cnt, 0); return 0; +asid_fini: + hl_asid_fini(hdev); early_fini: if (hdev->asic_funcs->early_fini) hdev->asic_funcs->early_fini(hdev); @@ -196,9 +206,12 @@ early_fini: */ static void device_early_fini(struct hl_device *hdev) { + mutex_destroy(&hdev->send_cpu_message_lock); hl_cb_mgr_fini(hdev, &hdev->kernel_cb_mgr); + destroy_workqueue(hdev->cq_wq); + hl_asid_fini(hdev); if (hdev->asic_funcs->early_fini) @@ -277,7 +290,7 @@ int hl_device_resume(struct hl_device *hdev) */ int hl_device_init(struct hl_device *hdev, struct class *hclass) { - int rc; + int i, rc, cq_ready_cnt; /* Create device */ rc = device_setup_cdev(hdev, hclass, hdev->id, &hl_ops); @@ -298,11 +311,48 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass) if (rc) goto early_fini; + /* + * Initialize the H/W queues. Must be done before hw_init, because + * there the addresses of the kernel queue are being written to the + * registers of the device + */ + rc = hl_hw_queues_create(hdev); + if (rc) { + dev_err(hdev->dev, "failed to initialize kernel queues\n"); + goto sw_fini; + } + + /* + * Initialize the completion queues. Must be done before hw_init, + * because there the addresses of the completion queues are being + * passed as arguments to request_irq + */ + hdev->completion_queue = + kcalloc(hdev->asic_prop.completion_queues_count, + sizeof(*hdev->completion_queue), GFP_KERNEL); + + if (!hdev->completion_queue) { + dev_err(hdev->dev, "failed to allocate completion queues\n"); + rc = -ENOMEM; + goto hw_queues_destroy; + } + + for (i = 0, cq_ready_cnt = 0; + i < hdev->asic_prop.completion_queues_count; + i++, cq_ready_cnt++) { + rc = hl_cq_init(hdev, &hdev->completion_queue[i], i); + if (rc) { + dev_err(hdev->dev, + "failed to initialize completion queue\n"); + goto cq_fini; + } + } + /* Allocate the kernel context */ hdev->kernel_ctx = kzalloc(sizeof(*hdev->kernel_ctx), GFP_KERNEL); if (!hdev->kernel_ctx) { rc = -ENOMEM; - goto sw_fini; + goto cq_fini; } hdev->user_ctx = NULL; @@ -328,6 +378,14 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass) hdev->disabled = false; + /* Check that the communication with the device is working */ + rc = hdev->asic_funcs->test_queues(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to detect if device is alive\n"); + rc = 0; + goto out_disabled; + } + dev_notice(hdev->dev, "Successfully added device to habanalabs driver\n"); @@ -339,6 +397,12 @@ release_ctx: "kernel ctx is still alive on initialization failure\n"); free_ctx: kfree(hdev->kernel_ctx); +cq_fini: + for (i = 0 ; i < cq_ready_cnt ; i++) + hl_cq_fini(hdev, &hdev->completion_queue[i]); + kfree(hdev->completion_queue); +hw_queues_destroy: + hl_hw_queues_destroy(hdev); sw_fini: hdev->asic_funcs->sw_fini(hdev); early_fini: @@ -368,6 +432,7 @@ out_disabled: */ void hl_device_fini(struct hl_device *hdev) { + int i; dev_info(hdev->dev, "Removing device\n"); /* Mark device as disabled */ @@ -382,6 +447,12 @@ void hl_device_fini(struct hl_device *hdev) /* Reset the H/W. It will be in idle state after this returns */ hdev->asic_funcs->hw_fini(hdev, true); + for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) + hl_cq_fini(hdev, &hdev->completion_queue[i]); + kfree(hdev->completion_queue); + + hl_hw_queues_destroy(hdev); + /* Call ASIC S/W finalize function */ hdev->asic_funcs->sw_fini(hdev); diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 1fa5b91fd703..a45a183d4e5c 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -90,6 +90,26 @@ static void goya_get_fixed_properties(struct hl_device *hdev) { struct asic_fixed_properties *prop = &hdev->asic_prop; + int i; + + for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) { + prop->hw_queues_props[i].type = QUEUE_TYPE_EXT; + prop->hw_queues_props[i].kmd_only = 0; + } + + for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES ; i++) { + prop->hw_queues_props[i].type = QUEUE_TYPE_CPU; + prop->hw_queues_props[i].kmd_only = 1; + } + + for (; i < NUMBER_OF_EXT_HW_QUEUES + NUMBER_OF_CPU_HW_QUEUES + + NUMBER_OF_INT_HW_QUEUES; i++) { + prop->hw_queues_props[i].type = QUEUE_TYPE_INT; + prop->hw_queues_props[i].kmd_only = 0; + } + + for (; i < HL_MAX_QUEUES; i++) + prop->hw_queues_props[i].type = QUEUE_TYPE_NA; prop->completion_queues_count = NUMBER_OF_CMPLT_QUEUES; @@ -118,6 +138,18 @@ static void goya_get_fixed_properties(struct hl_device *hdev) prop->high_pll = PLL_HIGH_DEFAULT; } +int goya_send_pci_access_msg(struct hl_device *hdev, u32 opcode) +{ + struct armcp_packet pkt; + + memset(&pkt, 0, sizeof(pkt)); + + pkt.ctl = opcode << ARMCP_PKT_CTL_OPCODE_SHIFT; + + return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, + sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL); +} + /* * goya_pci_bars_map - Map PCI BARS of Goya device * @@ -510,6 +542,8 @@ static int goya_sw_init(struct hl_device *hdev) if (!goya) return -ENOMEM; + goya->test_cpu_queue = goya_test_cpu_queue; + /* according to goya_init_iatu */ goya->ddr_bar_cur_addr = DRAM_PHYS_BASE; hdev->asic_specific = goya; @@ -596,6 +630,311 @@ int goya_sw_fini(struct hl_device *hdev) return 0; } +static void goya_init_dma_qman(struct hl_device *hdev, int dma_id, + dma_addr_t bus_address) +{ + struct goya_device *goya = hdev->asic_specific; + u32 mtr_base_lo, mtr_base_hi; + u32 so_base_lo, so_base_hi; + u32 gic_base_lo, gic_base_hi; + u32 reg_off = dma_id * (mmDMA_QM_1_PQ_PI - mmDMA_QM_0_PQ_PI); + + mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + + gic_base_lo = + lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + gic_base_hi = + upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + + WREG32(mmDMA_QM_0_PQ_BASE_LO + reg_off, lower_32_bits(bus_address)); + WREG32(mmDMA_QM_0_PQ_BASE_HI + reg_off, upper_32_bits(bus_address)); + + WREG32(mmDMA_QM_0_PQ_SIZE + reg_off, ilog2(HL_QUEUE_LENGTH)); + WREG32(mmDMA_QM_0_PQ_PI + reg_off, 0); + WREG32(mmDMA_QM_0_PQ_CI + reg_off, 0); + + WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo); + WREG32(mmDMA_QM_0_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi); + WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo); + WREG32(mmDMA_QM_0_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi); + WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo); + WREG32(mmDMA_QM_0_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi); + WREG32(mmDMA_QM_0_GLBL_ERR_WDATA + reg_off, + GOYA_ASYNC_EVENT_ID_DMA0_QM + dma_id); + + /* PQ has buffer of 2 cache lines, while CQ has 8 lines */ + WREG32(mmDMA_QM_0_PQ_CFG1 + reg_off, 0x00020002); + WREG32(mmDMA_QM_0_CQ_CFG1 + reg_off, 0x00080008); + + if (dma_id == 0) + WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, QMAN_DMA_FULLY_TRUSTED); + else + if (goya->hw_cap_initialized & HW_CAP_MMU) + WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, + QMAN_DMA_PARTLY_TRUSTED); + else + WREG32(mmDMA_QM_0_GLBL_PROT + reg_off, + QMAN_DMA_FULLY_TRUSTED); + + WREG32(mmDMA_QM_0_GLBL_ERR_CFG + reg_off, QMAN_DMA_ERR_MSG_EN); + WREG32(mmDMA_QM_0_GLBL_CFG0 + reg_off, QMAN_DMA_ENABLE); +} + +static void goya_init_dma_ch(struct hl_device *hdev, int dma_id) +{ + u32 gic_base_lo, gic_base_hi; + u64 sob_addr; + u32 reg_off = dma_id * (mmDMA_CH_1_CFG1 - mmDMA_CH_0_CFG1); + + gic_base_lo = + lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + gic_base_hi = + upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + + WREG32(mmDMA_CH_0_ERRMSG_ADDR_LO + reg_off, gic_base_lo); + WREG32(mmDMA_CH_0_ERRMSG_ADDR_HI + reg_off, gic_base_hi); + WREG32(mmDMA_CH_0_ERRMSG_WDATA + reg_off, + GOYA_ASYNC_EVENT_ID_DMA0_CH + dma_id); + + if (dma_id) { + sob_addr = CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1000 + + (dma_id - 1) * 4; + WREG32(mmDMA_CH_0_WR_COMP_ADDR_LO + reg_off, + lower_32_bits(sob_addr)); + WREG32(mmDMA_CH_0_WR_COMP_ADDR_HI + reg_off, + upper_32_bits(sob_addr)); + WREG32(mmDMA_CH_0_WR_COMP_WDATA + reg_off, 0x80000001); + } +} + +/* + * goya_init_dma_qmans - Initialize QMAN DMA registers + * + * @hdev: pointer to hl_device structure + * + * Initialize the H/W registers of the QMAN DMA channels + * + */ +static void goya_init_dma_qmans(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + struct hl_hw_queue *q; + dma_addr_t bus_address; + int i; + + if (goya->hw_cap_initialized & HW_CAP_DMA) + return; + + q = &hdev->kernel_queues[0]; + + for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++, q++) { + bus_address = q->bus_address + + hdev->asic_prop.host_phys_base_address; + + goya_init_dma_qman(hdev, i, bus_address); + goya_init_dma_ch(hdev, i); + } + + goya->hw_cap_initialized |= HW_CAP_DMA; +} + +/* + * goya_disable_external_queues - Disable external queues + * + * @hdev: pointer to hl_device structure + * + */ +static void goya_disable_external_queues(struct hl_device *hdev) +{ + WREG32(mmDMA_QM_0_GLBL_CFG0, 0); + WREG32(mmDMA_QM_1_GLBL_CFG0, 0); + WREG32(mmDMA_QM_2_GLBL_CFG0, 0); + WREG32(mmDMA_QM_3_GLBL_CFG0, 0); + WREG32(mmDMA_QM_4_GLBL_CFG0, 0); +} + +static int goya_stop_queue(struct hl_device *hdev, u32 cfg_reg, + u32 cp_sts_reg, u32 glbl_sts0_reg) +{ + int rc; + u32 status; + + /* use the values of TPC0 as they are all the same*/ + + WREG32(cfg_reg, 1 << TPC0_QM_GLBL_CFG1_CP_STOP_SHIFT); + + status = RREG32(cp_sts_reg); + if (status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK) { + rc = hl_poll_timeout( + hdev, + cp_sts_reg, + status, + !(status & TPC0_QM_CP_STS_FENCE_IN_PROGRESS_MASK), + 1000, + QMAN_FENCE_TIMEOUT_USEC); + + /* if QMAN is stuck in fence no need to check for stop */ + if (rc) + return 0; + } + + rc = hl_poll_timeout( + hdev, + glbl_sts0_reg, + status, + (status & TPC0_QM_GLBL_STS0_CP_IS_STOP_MASK), + 1000, + QMAN_STOP_TIMEOUT_USEC); + + if (rc) { + dev_err(hdev->dev, + "Timeout while waiting for QMAN to stop\n"); + return -EINVAL; + } + + return 0; +} + +/* + * goya_stop_external_queues - Stop external queues + * + * @hdev: pointer to hl_device structure + * + * Returns 0 on success + * + */ +static int goya_stop_external_queues(struct hl_device *hdev) +{ + int rc, retval = 0; + + rc = goya_stop_queue(hdev, + mmDMA_QM_0_GLBL_CFG1, + mmDMA_QM_0_CP_STS, + mmDMA_QM_0_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop DMA QMAN 0\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmDMA_QM_1_GLBL_CFG1, + mmDMA_QM_1_CP_STS, + mmDMA_QM_1_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop DMA QMAN 1\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmDMA_QM_2_GLBL_CFG1, + mmDMA_QM_2_CP_STS, + mmDMA_QM_2_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop DMA QMAN 2\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmDMA_QM_3_GLBL_CFG1, + mmDMA_QM_3_CP_STS, + mmDMA_QM_3_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop DMA QMAN 3\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmDMA_QM_4_GLBL_CFG1, + mmDMA_QM_4_CP_STS, + mmDMA_QM_4_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop DMA QMAN 4\n"); + retval = -EIO; + } + + return retval; +} + +static void goya_resume_external_queues(struct hl_device *hdev) +{ + WREG32(mmDMA_QM_0_GLBL_CFG1, 0); + WREG32(mmDMA_QM_1_GLBL_CFG1, 0); + WREG32(mmDMA_QM_2_GLBL_CFG1, 0); + WREG32(mmDMA_QM_3_GLBL_CFG1, 0); + WREG32(mmDMA_QM_4_GLBL_CFG1, 0); +} + +/* + * goya_init_cpu_queues - Initialize PQ/CQ/EQ of CPU + * + * @hdev: pointer to hl_device structure + * + * Returns 0 on success + * + */ +int goya_init_cpu_queues(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + dma_addr_t bus_address; + u32 status; + struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GOYA_QUEUE_ID_CPU_PQ]; + int err; + + if (!hdev->cpu_queues_enable) + return 0; + + if (goya->hw_cap_initialized & HW_CAP_CPU_Q) + return 0; + + bus_address = cpu_pq->bus_address + + hdev->asic_prop.host_phys_base_address; + WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_0, lower_32_bits(bus_address)); + WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_1, upper_32_bits(bus_address)); + + bus_address = hdev->cpu_accessible_dma_address + + hdev->asic_prop.host_phys_base_address; + WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_8, lower_32_bits(bus_address)); + WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_9, upper_32_bits(bus_address)); + + WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_5, HL_QUEUE_SIZE_IN_BYTES); + WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_10, CPU_ACCESSIBLE_MEM_SIZE); + + /* Used for EQ CI */ + WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, 0); + + WREG32(mmCPU_IF_PF_PQ_PI, 0); + + WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_7, PQ_INIT_STATUS_READY_FOR_CP); + + WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, + GOYA_ASYNC_EVENT_ID_PI_UPDATE); + + err = hl_poll_timeout( + hdev, + mmPSOC_GLOBAL_CONF_SCRATCHPAD_7, + status, + (status == PQ_INIT_STATUS_READY_FOR_HOST), + 1000, + GOYA_CPU_TIMEOUT_USEC); + + if (err) { + dev_err(hdev->dev, + "Failed to communicate with ARM CPU (ArmCP timeout)\n"); + return -EIO; + } + + goya->hw_cap_initialized |= HW_CAP_CPU_Q; + return 0; +} + static void goya_set_pll_refclk(struct hl_device *hdev) { WREG32(mmCPU_PLL_DIV_SEL_0, 0x0); @@ -1023,144 +1362,644 @@ static void goya_init_golden_registers(struct hl_device *hdev) goya->hw_cap_initialized |= HW_CAP_GOLDEN; } - -/* - * goya_push_fw_to_device - Push FW code to device - * - * @hdev: pointer to hl_device structure - * - * Copy fw code from firmware file to device memory. - * Returns 0 on success - * - */ -static int goya_push_fw_to_device(struct hl_device *hdev, const char *fw_name, - void __iomem *dst) +static void goya_init_mme_qman(struct hl_device *hdev) { - const struct firmware *fw; - const u64 *fw_data; - size_t fw_size, i; - int rc; + u32 mtr_base_lo, mtr_base_hi; + u32 so_base_lo, so_base_hi; + u32 gic_base_lo, gic_base_hi; + u64 qman_base_addr; - rc = request_firmware(&fw, fw_name, hdev->dev); + mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); - if (rc) { - dev_err(hdev->dev, "Failed to request %s\n", fw_name); - goto out; - } + gic_base_lo = + lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + gic_base_hi = + upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); - fw_size = fw->size; - if ((fw_size % 4) != 0) { - dev_err(hdev->dev, "illegal %s firmware size %zu\n", - fw_name, fw_size); - rc = -EINVAL; - goto out; - } + qman_base_addr = hdev->asic_prop.sram_base_address + + MME_QMAN_BASE_OFFSET; - dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size); + WREG32(mmMME_QM_PQ_BASE_LO, lower_32_bits(qman_base_addr)); + WREG32(mmMME_QM_PQ_BASE_HI, upper_32_bits(qman_base_addr)); + WREG32(mmMME_QM_PQ_SIZE, ilog2(MME_QMAN_LENGTH)); + WREG32(mmMME_QM_PQ_PI, 0); + WREG32(mmMME_QM_PQ_CI, 0); + WREG32(mmMME_QM_CP_LDMA_SRC_BASE_LO_OFFSET, 0x10C0); + WREG32(mmMME_QM_CP_LDMA_SRC_BASE_HI_OFFSET, 0x10C4); + WREG32(mmMME_QM_CP_LDMA_TSIZE_OFFSET, 0x10C8); + WREG32(mmMME_QM_CP_LDMA_COMMIT_OFFSET, 0x10CC); - fw_data = (const u64 *) fw->data; + WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_LO, mtr_base_lo); + WREG32(mmMME_QM_CP_MSG_BASE0_ADDR_HI, mtr_base_hi); + WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_LO, so_base_lo); + WREG32(mmMME_QM_CP_MSG_BASE1_ADDR_HI, so_base_hi); - if ((fw->size % 8) != 0) - fw_size -= 8; + /* QMAN CQ has 8 cache lines */ + WREG32(mmMME_QM_CQ_CFG1, 0x00080008); - for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) { - if (!(i & (0x80000 - 1))) { - dev_dbg(hdev->dev, - "copied so far %zu out of %zu for %s firmware", - i, fw_size, fw_name); - usleep_range(20, 100); - } + WREG32(mmMME_QM_GLBL_ERR_ADDR_LO, gic_base_lo); + WREG32(mmMME_QM_GLBL_ERR_ADDR_HI, gic_base_hi); - writeq(*fw_data, dst); - } + WREG32(mmMME_QM_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_QM); - if ((fw->size % 8) != 0) - writel(*(const u32 *) fw_data, dst); + WREG32(mmMME_QM_GLBL_ERR_CFG, QMAN_MME_ERR_MSG_EN); -out: - release_firmware(fw); - return rc; + WREG32(mmMME_QM_GLBL_PROT, QMAN_MME_ERR_PROT); + + WREG32(mmMME_QM_GLBL_CFG0, QMAN_MME_ENABLE); } -static int goya_pldm_init_cpu(struct hl_device *hdev) +static void goya_init_mme_cmdq(struct hl_device *hdev) { - char fw_name[200]; - void __iomem *dst; - u32 val, unit_rst_val; - int rc; + u32 mtr_base_lo, mtr_base_hi; + u32 so_base_lo, so_base_hi; + u32 gic_base_lo, gic_base_hi; + u64 qman_base_addr; - /* Must initialize SRAM scrambler before pushing u-boot to SRAM */ - goya_init_golden_registers(hdev); + mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); - /* Put ARM cores into reset */ - WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT); - val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL); + gic_base_lo = + lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + gic_base_hi = + upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); - /* Reset the CA53 MACRO */ - unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); - WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET); - val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); - WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val); - val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); + qman_base_addr = hdev->asic_prop.sram_base_address + + MME_QMAN_BASE_OFFSET; - snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin"); - dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET; - rc = goya_push_fw_to_device(hdev, fw_name, dst); - if (rc) - return rc; + WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_LO, mtr_base_lo); + WREG32(mmMME_CMDQ_CP_MSG_BASE0_ADDR_HI, mtr_base_hi); + WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_LO, so_base_lo); + WREG32(mmMME_CMDQ_CP_MSG_BASE1_ADDR_HI, so_base_hi); - snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb"); - dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET; - rc = goya_push_fw_to_device(hdev, fw_name, dst); - if (rc) - return rc; + /* CMDQ CQ has 20 cache lines */ + WREG32(mmMME_CMDQ_CQ_CFG1, 0x00140014); - WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY); - WREG32(mmPSOC_GLOBAL_CONF_WARM_REBOOT, CPU_BOOT_STATUS_NA); + WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_LO, gic_base_lo); + WREG32(mmMME_CMDQ_GLBL_ERR_ADDR_HI, gic_base_hi); - WREG32(mmCPU_CA53_CFG_RST_ADDR_LSB_0, - lower_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET)); - WREG32(mmCPU_CA53_CFG_RST_ADDR_MSB_0, - upper_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET)); + WREG32(mmMME_CMDQ_GLBL_ERR_WDATA, GOYA_ASYNC_EVENT_ID_MME_CMDQ); - /* Release ARM core 0 from reset */ - WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, - CPU_RESET_CORE0_DEASSERT); - val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL); + WREG32(mmMME_CMDQ_GLBL_ERR_CFG, CMDQ_MME_ERR_MSG_EN); - return 0; + WREG32(mmMME_CMDQ_GLBL_PROT, CMDQ_MME_ERR_PROT); + + WREG32(mmMME_CMDQ_GLBL_CFG0, CMDQ_MME_ENABLE); } -/* - * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx. - * The version string should be located by that offset. - */ -static void goya_read_device_fw_version(struct hl_device *hdev, - enum goya_fw_component fwc) +static void goya_init_mme_qmans(struct hl_device *hdev) { - const char *name; - u32 ver_off; - char *dest; + struct goya_device *goya = hdev->asic_specific; + u32 so_base_lo, so_base_hi; - switch (fwc) { - case FW_COMP_UBOOT: - ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_29); - dest = hdev->asic_prop.uboot_ver; - name = "U-Boot"; - break; - case FW_COMP_PREBOOT: - ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_28); - dest = hdev->asic_prop.preboot_ver; - name = "Preboot"; - break; - default: - dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc); + if (goya->hw_cap_initialized & HW_CAP_MME) return; - } - ver_off &= ~((u32)SRAM_BASE_ADDR); + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); - if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) { + WREG32(mmMME_SM_BASE_ADDRESS_LOW, so_base_lo); + WREG32(mmMME_SM_BASE_ADDRESS_HIGH, so_base_hi); + + goya_init_mme_qman(hdev); + goya_init_mme_cmdq(hdev); + + goya->hw_cap_initialized |= HW_CAP_MME; +} + +static void goya_init_tpc_qman(struct hl_device *hdev, u32 base_off, int tpc_id) +{ + u32 mtr_base_lo, mtr_base_hi; + u32 so_base_lo, so_base_hi; + u32 gic_base_lo, gic_base_hi; + u64 qman_base_addr; + u32 reg_off = tpc_id * (mmTPC1_QM_PQ_PI - mmTPC0_QM_PQ_PI); + + mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + + gic_base_lo = + lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + gic_base_hi = + upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + + qman_base_addr = hdev->asic_prop.sram_base_address + base_off; + + WREG32(mmTPC0_QM_PQ_BASE_LO + reg_off, lower_32_bits(qman_base_addr)); + WREG32(mmTPC0_QM_PQ_BASE_HI + reg_off, upper_32_bits(qman_base_addr)); + WREG32(mmTPC0_QM_PQ_SIZE + reg_off, ilog2(TPC_QMAN_LENGTH)); + WREG32(mmTPC0_QM_PQ_PI + reg_off, 0); + WREG32(mmTPC0_QM_PQ_CI + reg_off, 0); + WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_LO_OFFSET + reg_off, 0x10C0); + WREG32(mmTPC0_QM_CP_LDMA_SRC_BASE_HI_OFFSET + reg_off, 0x10C4); + WREG32(mmTPC0_QM_CP_LDMA_TSIZE_OFFSET + reg_off, 0x10C8); + WREG32(mmTPC0_QM_CP_LDMA_COMMIT_OFFSET + reg_off, 0x10CC); + + WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo); + WREG32(mmTPC0_QM_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi); + WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo); + WREG32(mmTPC0_QM_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi); + + WREG32(mmTPC0_QM_CQ_CFG1 + reg_off, 0x00080008); + + WREG32(mmTPC0_QM_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo); + WREG32(mmTPC0_QM_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi); + + WREG32(mmTPC0_QM_GLBL_ERR_WDATA + reg_off, + GOYA_ASYNC_EVENT_ID_TPC0_QM + tpc_id); + + WREG32(mmTPC0_QM_GLBL_ERR_CFG + reg_off, QMAN_TPC_ERR_MSG_EN); + + WREG32(mmTPC0_QM_GLBL_PROT + reg_off, QMAN_TPC_ERR_PROT); + + WREG32(mmTPC0_QM_GLBL_CFG0 + reg_off, QMAN_TPC_ENABLE); +} + +static void goya_init_tpc_cmdq(struct hl_device *hdev, int tpc_id) +{ + u32 mtr_base_lo, mtr_base_hi; + u32 so_base_lo, so_base_hi; + u32 gic_base_lo, gic_base_hi; + u32 reg_off = tpc_id * (mmTPC1_CMDQ_CQ_CFG1 - mmTPC0_CMDQ_CQ_CFG1); + + mtr_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + mtr_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_MON_PAY_ADDRL_0); + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + + gic_base_lo = + lower_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + gic_base_hi = + upper_32_bits(CFG_BASE + mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR); + + WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_LO + reg_off, mtr_base_lo); + WREG32(mmTPC0_CMDQ_CP_MSG_BASE0_ADDR_HI + reg_off, mtr_base_hi); + WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_LO + reg_off, so_base_lo); + WREG32(mmTPC0_CMDQ_CP_MSG_BASE1_ADDR_HI + reg_off, so_base_hi); + + WREG32(mmTPC0_CMDQ_CQ_CFG1 + reg_off, 0x00140014); + + WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_LO + reg_off, gic_base_lo); + WREG32(mmTPC0_CMDQ_GLBL_ERR_ADDR_HI + reg_off, gic_base_hi); + + WREG32(mmTPC0_CMDQ_GLBL_ERR_WDATA + reg_off, + GOYA_ASYNC_EVENT_ID_TPC0_CMDQ + tpc_id); + + WREG32(mmTPC0_CMDQ_GLBL_ERR_CFG + reg_off, CMDQ_TPC_ERR_MSG_EN); + + WREG32(mmTPC0_CMDQ_GLBL_PROT + reg_off, CMDQ_TPC_ERR_PROT); + + WREG32(mmTPC0_CMDQ_GLBL_CFG0 + reg_off, CMDQ_TPC_ENABLE); +} + +static void goya_init_tpc_qmans(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + u32 so_base_lo, so_base_hi; + u32 cfg_off = mmTPC1_CFG_SM_BASE_ADDRESS_LOW - + mmTPC0_CFG_SM_BASE_ADDRESS_LOW; + int i; + + if (goya->hw_cap_initialized & HW_CAP_TPC) + return; + + so_base_lo = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + so_base_hi = upper_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + + for (i = 0 ; i < TPC_MAX_NUM ; i++) { + WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_LOW + i * cfg_off, + so_base_lo); + WREG32(mmTPC0_CFG_SM_BASE_ADDRESS_HIGH + i * cfg_off, + so_base_hi); + } + + goya_init_tpc_qman(hdev, TPC0_QMAN_BASE_OFFSET, 0); + goya_init_tpc_qman(hdev, TPC1_QMAN_BASE_OFFSET, 1); + goya_init_tpc_qman(hdev, TPC2_QMAN_BASE_OFFSET, 2); + goya_init_tpc_qman(hdev, TPC3_QMAN_BASE_OFFSET, 3); + goya_init_tpc_qman(hdev, TPC4_QMAN_BASE_OFFSET, 4); + goya_init_tpc_qman(hdev, TPC5_QMAN_BASE_OFFSET, 5); + goya_init_tpc_qman(hdev, TPC6_QMAN_BASE_OFFSET, 6); + goya_init_tpc_qman(hdev, TPC7_QMAN_BASE_OFFSET, 7); + + for (i = 0 ; i < TPC_MAX_NUM ; i++) + goya_init_tpc_cmdq(hdev, i); + + goya->hw_cap_initialized |= HW_CAP_TPC; +} + +/* + * goya_disable_internal_queues - Disable internal queues + * + * @hdev: pointer to hl_device structure + * + */ +static void goya_disable_internal_queues(struct hl_device *hdev) +{ + WREG32(mmMME_QM_GLBL_CFG0, 0); + WREG32(mmMME_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC0_QM_GLBL_CFG0, 0); + WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC1_QM_GLBL_CFG0, 0); + WREG32(mmTPC1_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC2_QM_GLBL_CFG0, 0); + WREG32(mmTPC2_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC3_QM_GLBL_CFG0, 0); + WREG32(mmTPC3_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC4_QM_GLBL_CFG0, 0); + WREG32(mmTPC4_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC5_QM_GLBL_CFG0, 0); + WREG32(mmTPC5_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC6_QM_GLBL_CFG0, 0); + WREG32(mmTPC6_CMDQ_GLBL_CFG0, 0); + + WREG32(mmTPC7_QM_GLBL_CFG0, 0); + WREG32(mmTPC7_CMDQ_GLBL_CFG0, 0); +} + +/* + * goya_stop_internal_queues - Stop internal queues + * + * @hdev: pointer to hl_device structure + * + * Returns 0 on success + * + */ +static int goya_stop_internal_queues(struct hl_device *hdev) +{ + int rc, retval = 0; + + /* + * Each queue (QMAN) is a separate H/W logic. That means that each + * QMAN can be stopped independently and failure to stop one does NOT + * mandate we should not try to stop other QMANs + */ + + rc = goya_stop_queue(hdev, + mmMME_QM_GLBL_CFG1, + mmMME_QM_CP_STS, + mmMME_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop MME QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmMME_CMDQ_GLBL_CFG1, + mmMME_CMDQ_CP_STS, + mmMME_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop MME CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC0_QM_GLBL_CFG1, + mmTPC0_QM_CP_STS, + mmTPC0_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 0 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC0_CMDQ_GLBL_CFG1, + mmTPC0_CMDQ_CP_STS, + mmTPC0_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 0 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC1_QM_GLBL_CFG1, + mmTPC1_QM_CP_STS, + mmTPC1_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 1 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC1_CMDQ_GLBL_CFG1, + mmTPC1_CMDQ_CP_STS, + mmTPC1_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 1 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC2_QM_GLBL_CFG1, + mmTPC2_QM_CP_STS, + mmTPC2_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 2 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC2_CMDQ_GLBL_CFG1, + mmTPC2_CMDQ_CP_STS, + mmTPC2_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 2 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC3_QM_GLBL_CFG1, + mmTPC3_QM_CP_STS, + mmTPC3_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 3 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC3_CMDQ_GLBL_CFG1, + mmTPC3_CMDQ_CP_STS, + mmTPC3_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 3 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC4_QM_GLBL_CFG1, + mmTPC4_QM_CP_STS, + mmTPC4_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 4 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC4_CMDQ_GLBL_CFG1, + mmTPC4_CMDQ_CP_STS, + mmTPC4_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 4 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC5_QM_GLBL_CFG1, + mmTPC5_QM_CP_STS, + mmTPC5_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 5 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC5_CMDQ_GLBL_CFG1, + mmTPC5_CMDQ_CP_STS, + mmTPC5_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 5 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC6_QM_GLBL_CFG1, + mmTPC6_QM_CP_STS, + mmTPC6_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 6 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC6_CMDQ_GLBL_CFG1, + mmTPC6_CMDQ_CP_STS, + mmTPC6_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 6 CMDQ\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC7_QM_GLBL_CFG1, + mmTPC7_QM_CP_STS, + mmTPC7_QM_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 7 QMAN\n"); + retval = -EIO; + } + + rc = goya_stop_queue(hdev, + mmTPC7_CMDQ_GLBL_CFG1, + mmTPC7_CMDQ_CP_STS, + mmTPC7_CMDQ_GLBL_STS0); + + if (rc) { + dev_err(hdev->dev, "failed to stop TPC 7 CMDQ\n"); + retval = -EIO; + } + + return retval; +} + +static void goya_resume_internal_queues(struct hl_device *hdev) +{ + WREG32(mmMME_QM_GLBL_CFG1, 0); + WREG32(mmMME_CMDQ_GLBL_CFG1, 0); + + WREG32(mmTPC0_QM_GLBL_CFG1, 0); + WREG32(mmTPC0_CMDQ_GLBL_CFG1, 0); + + WREG32(mmTPC1_QM_GLBL_CFG1, 0); + WREG32(mmTPC1_CMDQ_GLBL_CFG1, 0); + + WREG32(mmTPC2_QM_GLBL_CFG1, 0); + WREG32(mmTPC2_CMDQ_GLBL_CFG1, 0); + + WREG32(mmTPC3_QM_GLBL_CFG1, 0); + WREG32(mmTPC3_CMDQ_GLBL_CFG1, 0); + + WREG32(mmTPC4_QM_GLBL_CFG1, 0); + WREG32(mmTPC4_CMDQ_GLBL_CFG1, 0); + + WREG32(mmTPC5_QM_GLBL_CFG1, 0); + WREG32(mmTPC5_CMDQ_GLBL_CFG1, 0); + + WREG32(mmTPC6_QM_GLBL_CFG1, 0); + WREG32(mmTPC6_CMDQ_GLBL_CFG1, 0); + + WREG32(mmTPC7_QM_GLBL_CFG1, 0); + WREG32(mmTPC7_CMDQ_GLBL_CFG1, 0); +} + + +/* + * goya_push_fw_to_device - Push FW code to device + * + * @hdev: pointer to hl_device structure + * + * Copy fw code from firmware file to device memory. + * Returns 0 on success + * + */ +static int goya_push_fw_to_device(struct hl_device *hdev, const char *fw_name, + void __iomem *dst) +{ + const struct firmware *fw; + const u64 *fw_data; + size_t fw_size, i; + int rc; + + rc = request_firmware(&fw, fw_name, hdev->dev); + + if (rc) { + dev_err(hdev->dev, "Failed to request %s\n", fw_name); + goto out; + } + + fw_size = fw->size; + if ((fw_size % 4) != 0) { + dev_err(hdev->dev, "illegal %s firmware size %zu\n", + fw_name, fw_size); + rc = -EINVAL; + goto out; + } + + dev_dbg(hdev->dev, "%s firmware size == %zu\n", fw_name, fw_size); + + fw_data = (const u64 *) fw->data; + + if ((fw->size % 8) != 0) + fw_size -= 8; + + for (i = 0 ; i < fw_size ; i += 8, fw_data++, dst += 8) { + if (!(i & (0x80000 - 1))) { + dev_dbg(hdev->dev, + "copied so far %zu out of %zu for %s firmware", + i, fw_size, fw_name); + usleep_range(20, 100); + } + + writeq(*fw_data, dst); + } + + if ((fw->size % 8) != 0) + writel(*(const u32 *) fw_data, dst); + +out: + release_firmware(fw); + return rc; +} + +static int goya_pldm_init_cpu(struct hl_device *hdev) +{ + char fw_name[200]; + void __iomem *dst; + u32 val, unit_rst_val; + int rc; + + /* Must initialize SRAM scrambler before pushing u-boot to SRAM */ + goya_init_golden_registers(hdev); + + /* Put ARM cores into reset */ + WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, CPU_RESET_ASSERT); + val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL); + + /* Reset the CA53 MACRO */ + unit_rst_val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); + WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, CA53_RESET); + val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); + WREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N, unit_rst_val); + val = RREG32(mmPSOC_GLOBAL_CONF_UNIT_RST_N); + + snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-u-boot.bin"); + dst = hdev->pcie_bar[SRAM_CFG_BAR_ID] + UBOOT_FW_OFFSET; + rc = goya_push_fw_to_device(hdev, fw_name, dst); + if (rc) + return rc; + + snprintf(fw_name, sizeof(fw_name), "habanalabs/goya/goya-fit.itb"); + dst = hdev->pcie_bar[DDR_BAR_ID] + LINUX_FW_OFFSET; + rc = goya_push_fw_to_device(hdev, fw_name, dst); + if (rc) + return rc; + + WREG32(mmPSOC_GLOBAL_CONF_UBOOT_MAGIC, KMD_MSG_FIT_RDY); + WREG32(mmPSOC_GLOBAL_CONF_WARM_REBOOT, CPU_BOOT_STATUS_NA); + + WREG32(mmCPU_CA53_CFG_RST_ADDR_LSB_0, + lower_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET)); + WREG32(mmCPU_CA53_CFG_RST_ADDR_MSB_0, + upper_32_bits(SRAM_BASE_ADDR + UBOOT_FW_OFFSET)); + + /* Release ARM core 0 from reset */ + WREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL, + CPU_RESET_CORE0_DEASSERT); + val = RREG32(mmCPU_CA53_CFG_ARM_RST_CONTROL); + + return 0; +} + +/* + * FW component passes an offset from SRAM_BASE_ADDR in SCRATCHPAD_xx. + * The version string should be located by that offset. + */ +static void goya_read_device_fw_version(struct hl_device *hdev, + enum goya_fw_component fwc) +{ + const char *name; + u32 ver_off; + char *dest; + + switch (fwc) { + case FW_COMP_UBOOT: + ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_29); + dest = hdev->asic_prop.uboot_ver; + name = "U-Boot"; + break; + case FW_COMP_PREBOOT: + ver_off = RREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_28); + dest = hdev->asic_prop.preboot_ver; + name = "Preboot"; + break; + default: + dev_warn(hdev->dev, "Undefined FW component: %d\n", fwc); + return; + } + + ver_off &= ~((u32)SRAM_BASE_ADDR); + + if (ver_off < SRAM_SIZE - VERSION_MAX_LEN) { memcpy_fromio(dest, hdev->pcie_bar[SRAM_CFG_BAR_ID] + ver_off, VERSION_MAX_LEN); } else { @@ -1344,6 +2183,19 @@ static int goya_hw_init(struct hl_device *hdev) goya_init_security(hdev); + goya_init_dma_qmans(hdev); + + goya_init_mme_qmans(hdev); + + goya_init_tpc_qmans(hdev); + + rc = goya_init_cpu_queues(hdev); + if (rc) { + dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n", + rc); + goto disable_queues; + } + /* CPU initialization is finished, we can now move to 48 bit DMA mask */ rc = pci_set_dma_mask(hdev->pdev, DMA_BIT_MASK(48)); if (rc) { @@ -1352,7 +2204,7 @@ static int goya_hw_init(struct hl_device *hdev) if (rc) { dev_err(hdev->dev, "Unable to set pci dma mask to 32 bits\n"); - return rc; + goto disable_pci_access; } } @@ -1364,7 +2216,7 @@ static int goya_hw_init(struct hl_device *hdev) if (rc) { dev_err(hdev->dev, "Unable to set pci consistent dma mask to 32 bits\n"); - return rc; + goto disable_pci_access; } } @@ -1372,6 +2224,14 @@ static int goya_hw_init(struct hl_device *hdev) val = RREG32(mmPCIE_DBI_DEVICE_ID_VENDOR_ID_REG); return 0; + +disable_pci_access: + goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS); +disable_queues: + goya_disable_internal_queues(hdev); + goya_disable_external_queues(hdev); + + return rc; } /* @@ -1460,12 +2320,40 @@ static void goya_hw_fini(struct hl_device *hdev, bool hard_reset) int goya_suspend(struct hl_device *hdev) { - return 0; + int rc; + + rc = goya_stop_internal_queues(hdev); + + if (rc) { + dev_err(hdev->dev, "failed to stop internal queues\n"); + return rc; + } + + rc = goya_stop_external_queues(hdev); + + if (rc) { + dev_err(hdev->dev, "failed to stop external queues\n"); + return rc; + } + + rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS); + if (rc) + dev_err(hdev->dev, "Failed to disable PCI access from CPU\n"); + + return rc; } int goya_resume(struct hl_device *hdev) { - return 0; + int rc; + + goya_resume_external_queues(hdev); + goya_resume_internal_queues(hdev); + + rc = goya_send_pci_access_msg(hdev, ARMCP_PACKET_ENABLE_PCI_ACCESS); + if (rc) + dev_err(hdev->dev, "Failed to enable PCI access from CPU\n"); + return rc; } int goya_mmap(struct hl_fpriv *hpriv, struct vm_area_struct *vma) @@ -1489,6 +2377,101 @@ int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma, return rc; } +void goya_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi) +{ + u32 db_reg_offset, db_value; + bool invalid_queue = false; + + switch (hw_queue_id) { + case GOYA_QUEUE_ID_DMA_0: + db_reg_offset = mmDMA_QM_0_PQ_PI; + break; + + case GOYA_QUEUE_ID_DMA_1: + db_reg_offset = mmDMA_QM_1_PQ_PI; + break; + + case GOYA_QUEUE_ID_DMA_2: + db_reg_offset = mmDMA_QM_2_PQ_PI; + break; + + case GOYA_QUEUE_ID_DMA_3: + db_reg_offset = mmDMA_QM_3_PQ_PI; + break; + + case GOYA_QUEUE_ID_DMA_4: + db_reg_offset = mmDMA_QM_4_PQ_PI; + break; + + case GOYA_QUEUE_ID_CPU_PQ: + if (hdev->cpu_queues_enable) + db_reg_offset = mmCPU_IF_PF_PQ_PI; + else + invalid_queue = true; + break; + + case GOYA_QUEUE_ID_MME: + db_reg_offset = mmMME_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC0: + db_reg_offset = mmTPC0_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC1: + db_reg_offset = mmTPC1_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC2: + db_reg_offset = mmTPC2_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC3: + db_reg_offset = mmTPC3_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC4: + db_reg_offset = mmTPC4_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC5: + db_reg_offset = mmTPC5_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC6: + db_reg_offset = mmTPC6_QM_PQ_PI; + break; + + case GOYA_QUEUE_ID_TPC7: + db_reg_offset = mmTPC7_QM_PQ_PI; + break; + + default: + invalid_queue = true; + } + + if (invalid_queue) { + /* Should never get here */ + dev_err(hdev->dev, "h/w queue %d is invalid. Can't set pi\n", + hw_queue_id); + return; + } + + db_value = pi; + + /* ring the doorbell */ + WREG32(db_reg_offset, db_value); + + if (hw_queue_id == GOYA_QUEUE_ID_CPU_PQ) + WREG32(mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR, + GOYA_ASYNC_EVENT_ID_PI_UPDATE); +} + +void goya_flush_pq_write(struct hl_device *hdev, u64 *pq, u64 exp_val) +{ + /* Not needed in Goya */ +} + void *goya_dma_alloc_coherent(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle, gfp_t flags) { @@ -1501,6 +2484,315 @@ void goya_dma_free_coherent(struct hl_device *hdev, size_t size, void *cpu_addr, dma_free_coherent(&hdev->pdev->dev, size, cpu_addr, dma_handle); } +void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id, + dma_addr_t *dma_handle, u16 *queue_len) +{ + void *base; + u32 offset; + + *dma_handle = hdev->asic_prop.sram_base_address; + + base = hdev->pcie_bar[SRAM_CFG_BAR_ID]; + + switch (queue_id) { + case GOYA_QUEUE_ID_MME: + offset = MME_QMAN_BASE_OFFSET; + *queue_len = MME_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC0: + offset = TPC0_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC1: + offset = TPC1_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC2: + offset = TPC2_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC3: + offset = TPC3_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC4: + offset = TPC4_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC5: + offset = TPC5_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC6: + offset = TPC6_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + case GOYA_QUEUE_ID_TPC7: + offset = TPC7_QMAN_BASE_OFFSET; + *queue_len = TPC_QMAN_LENGTH; + break; + default: + dev_err(hdev->dev, "Got invalid queue id %d\n", queue_id); + return NULL; + } + + base += offset; + *dma_handle += offset; + + return base; +} + +int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len, + u32 timeout, long *result) +{ + struct goya_device *goya = hdev->asic_specific; + struct armcp_packet *pkt; + dma_addr_t pkt_dma_addr; + u32 tmp; + int rc = 0; + + if (!(goya->hw_cap_initialized & HW_CAP_CPU_Q)) { + if (result) + *result = 0; + return 0; + } + + if (len > CPU_CB_SIZE) { + dev_err(hdev->dev, "Invalid CPU message size of %d bytes\n", + len); + return -ENOMEM; + } + + pkt = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev, len, + &pkt_dma_addr); + if (!pkt) { + dev_err(hdev->dev, + "Failed to allocate DMA memory for packet to CPU\n"); + return -ENOMEM; + } + + memcpy(pkt, msg, len); + + mutex_lock(&hdev->send_cpu_message_lock); + + if (hdev->disabled) + goto out; + + rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_CPU_PQ, len, + pkt_dma_addr); + if (rc) { + dev_err(hdev->dev, "Failed to send CB on CPU PQ (%d)\n", rc); + goto out; + } + + rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) &pkt->fence, + timeout, &tmp); + + hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_CPU_PQ); + + if (rc == -ETIMEDOUT) { + dev_err(hdev->dev, + "Timeout while waiting for CPU packet fence\n"); + goto out; + } + + if (tmp == ARMCP_PACKET_FENCE_VAL) { + rc = (pkt->ctl & ARMCP_PKT_CTL_RC_MASK) >> + ARMCP_PKT_CTL_RC_SHIFT; + if (rc) { + dev_err(hdev->dev, + "F/W ERROR %d for CPU packet %d\n", + rc, (pkt->ctl & ARMCP_PKT_CTL_OPCODE_MASK) + >> ARMCP_PKT_CTL_OPCODE_SHIFT); + rc = -EINVAL; + } else if (result) { + *result = pkt->result; + } + } else { + dev_err(hdev->dev, "CPU packet wrong fence value\n"); + rc = -EINVAL; + } + +out: + mutex_unlock(&hdev->send_cpu_message_lock); + + hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev, len, pkt); + + return rc; +} + +int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id) +{ + struct packet_msg_prot *fence_pkt; + dma_addr_t pkt_dma_addr; + u32 fence_val, tmp; + dma_addr_t fence_dma_addr; + u32 *fence_ptr; + int rc; + + fence_val = GOYA_QMAN0_FENCE_VAL; + + fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL, + &fence_dma_addr); + if (!fence_ptr) { + dev_err(hdev->dev, + "Failed to allocate memory for queue testing\n"); + return -ENOMEM; + } + + *fence_ptr = 0; + + fence_pkt = hdev->asic_funcs->dma_pool_zalloc(hdev, + sizeof(struct packet_msg_prot), + GFP_KERNEL, &pkt_dma_addr); + if (!fence_pkt) { + dev_err(hdev->dev, + "Failed to allocate packet for queue testing\n"); + rc = -ENOMEM; + goto free_fence_ptr; + } + + fence_pkt->ctl = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) | + (1 << GOYA_PKT_CTL_EB_SHIFT) | + (1 << GOYA_PKT_CTL_MB_SHIFT); + fence_pkt->value = fence_val; + fence_pkt->addr = fence_dma_addr + + hdev->asic_prop.host_phys_base_address; + + rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, + sizeof(struct packet_msg_prot), + pkt_dma_addr); + if (rc) { + dev_err(hdev->dev, + "Failed to send fence packet\n"); + goto free_pkt; + } + + rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr, + GOYA_TEST_QUEUE_WAIT_USEC, &tmp); + + hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id); + + if ((!rc) && (tmp == fence_val)) { + dev_info(hdev->dev, + "queue test on H/W queue %d succeeded\n", + hw_queue_id); + } else { + dev_err(hdev->dev, + "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n", + hw_queue_id, (unsigned long long) fence_dma_addr, tmp); + rc = -EINVAL; + } + +free_pkt: + hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_pkt, + pkt_dma_addr); +free_fence_ptr: + hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr, + fence_dma_addr); + return rc; +} + +int goya_test_cpu_queue(struct hl_device *hdev) +{ + struct armcp_packet test_pkt; + long result; + int rc; + + /* cpu_queues_enable flag is always checked in send cpu message */ + + memset(&test_pkt, 0, sizeof(test_pkt)); + + test_pkt.ctl = ARMCP_PACKET_TEST << ARMCP_PKT_CTL_OPCODE_SHIFT; + test_pkt.value = ARMCP_PACKET_FENCE_VAL; + + rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt, + sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result); + + if (!rc) + dev_info(hdev->dev, "queue test on CPU queue succeeded\n"); + else + dev_err(hdev->dev, "CPU queue test failed (0x%08lX)\n", result); + + return rc; +} + +static int goya_test_queues(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + int i, rc, ret_val = 0; + + for (i = 0 ; i < NUMBER_OF_EXT_HW_QUEUES ; i++) { + rc = goya_test_queue(hdev, i); + if (rc) + ret_val = -EINVAL; + } + + if (hdev->cpu_queues_enable) { + rc = goya->test_cpu_queue(hdev); + if (rc) + ret_val = -EINVAL; + } + + return ret_val; +} + +void *goya_dma_pool_zalloc(struct hl_device *hdev, size_t size, gfp_t mem_flags, + dma_addr_t *dma_handle) +{ + if (size > GOYA_DMA_POOL_BLK_SIZE) + return NULL; + + return dma_pool_zalloc(hdev->dma_pool, mem_flags, dma_handle); +} + +void goya_dma_pool_free(struct hl_device *hdev, void *vaddr, + dma_addr_t dma_addr) +{ + dma_pool_free(hdev->dma_pool, vaddr, dma_addr); +} + +void *goya_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, + dma_addr_t *dma_handle) +{ + u64 kernel_addr; + + /* roundup to CPU_PKT_SIZE */ + size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK; + + kernel_addr = gen_pool_alloc(hdev->cpu_accessible_dma_pool, size); + + *dma_handle = hdev->cpu_accessible_dma_address + + (kernel_addr - (u64) (uintptr_t) hdev->cpu_accessible_dma_mem); + + return (void *) (uintptr_t) kernel_addr; +} + +void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, + void *vaddr) +{ + /* roundup to CPU_PKT_SIZE */ + size = (size + (CPU_PKT_SIZE - 1)) & CPU_PKT_MASK; + + gen_pool_free(hdev->cpu_accessible_dma_pool, (u64) (uintptr_t) vaddr, + size); +} + + +static void goya_hw_queues_lock(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + + spin_lock(&goya->hw_queues_lock); +} + +static void goya_hw_queues_unlock(struct hl_device *hdev) +{ + struct goya_device *goya = hdev->asic_specific; + + spin_unlock(&goya->hw_queues_lock); +} + static const struct hl_asic_funcs goya_funcs = { .early_init = goya_early_init, .early_fini = goya_early_fini, @@ -1512,8 +2804,19 @@ static const struct hl_asic_funcs goya_funcs = { .resume = goya_resume, .mmap = goya_mmap, .cb_mmap = goya_cb_mmap, + .ring_doorbell = goya_ring_doorbell, + .flush_pq_write = goya_flush_pq_write, .dma_alloc_coherent = goya_dma_alloc_coherent, .dma_free_coherent = goya_dma_free_coherent, + .get_int_queue_base = goya_get_int_queue_base, + .test_queues = goya_test_queues, + .dma_pool_zalloc = goya_dma_pool_zalloc, + .dma_pool_free = goya_dma_pool_free, + .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc, + .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free, + .hw_queues_lock = goya_hw_queues_lock, + .hw_queues_unlock = goya_hw_queues_unlock, + .send_cpu_message = goya_send_cpu_message }; /* diff --git a/drivers/misc/habanalabs/goya/goyaP.h b/drivers/misc/habanalabs/goya/goyaP.h index 65cbb45d7083..791605cbecfe 100644 --- a/drivers/misc/habanalabs/goya/goyaP.h +++ b/drivers/misc/habanalabs/goya/goyaP.h @@ -11,7 +11,9 @@ #include #include "habanalabs.h" #include "include/hl_boot_if.h" +#include "include/goya/goya_packets.h" #include "include/goya/goya.h" +#include "include/goya/goya_async_events.h" #include "include/goya/goya_fw_if.h" #define NUMBER_OF_CMPLT_QUEUES 5 @@ -145,12 +147,17 @@ enum goya_fw_component { }; struct goya_device { + int (*test_cpu_queue)(struct hl_device *hdev); + /* TODO: remove hw_queues_lock after moving to scheduler code */ spinlock_t hw_queues_lock; u64 ddr_bar_cur_addr; u32 hw_cap_initialized; }; +int goya_test_cpu_queue(struct hl_device *hdev); +int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len, + u32 timeout, long *result); void goya_init_security(struct hl_device *hdev); #endif /* GOYAP_H_ */ diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h index e099f7a9dac2..2121babbebdc 100644 --- a/drivers/misc/habanalabs/habanalabs.h +++ b/drivers/misc/habanalabs/habanalabs.h @@ -9,6 +9,7 @@ #define HABANALABSP_H_ #include "include/armcp_if.h" +#include "include/qman_if.h" #define pr_fmt(fmt) "habanalabs: " fmt @@ -26,9 +27,36 @@ struct hl_device; struct hl_fpriv; +/** + * enum hl_queue_type - Supported QUEUE types. + * @QUEUE_TYPE_NA: queue is not available. + * @QUEUE_TYPE_EXT: external queue which is a DMA channel that may access the + * host. + * @QUEUE_TYPE_INT: internal queue that performs DMA inside the device's + * memories and/or operates the compute engines. + * @QUEUE_TYPE_CPU: S/W queue for communication with the device's CPU. + */ +enum hl_queue_type { + QUEUE_TYPE_NA, + QUEUE_TYPE_EXT, + QUEUE_TYPE_INT, + QUEUE_TYPE_CPU +}; + +/** + * struct hw_queue_properties - queue information. + * @type: queue type. + * @kmd_only: true if only KMD is allowed to send a job to this queue, false + * otherwise. + */ +struct hw_queue_properties { + enum hl_queue_type type; + u8 kmd_only; +}; /** * struct asic_fixed_properties - ASIC specific immutable properties. + * @hw_queues_props: H/W queues properties. * @uboot_ver: F/W U-boot version. * @preboot_ver: F/W Preboot version. * @sram_base_address: SRAM physical start address. @@ -59,6 +87,7 @@ struct hl_fpriv; * @tpc_enabled_mask: which TPCs are enabled. */ struct asic_fixed_properties { + struct hw_queue_properties hw_queues_props[HL_MAX_QUEUES]; char uboot_ver[VERSION_MAX_LEN]; char preboot_ver[VERSION_MAX_LEN]; u64 sram_base_address; @@ -132,7 +161,89 @@ struct hl_cb { }; +/* + * QUEUES + */ + +struct hl_cs_job; + +/* + * Currently, there are two limitations on the maximum length of a queue: + * + * 1. The memory footprint of the queue. The current allocated space for the + * queue is PAGE_SIZE. Because each entry in the queue is HL_BD_SIZE, + * the maximum length of the queue can be PAGE_SIZE / HL_BD_SIZE, + * which currently is 4096/16 = 256 entries. + * + * To increase that, we need either to decrease the size of the + * BD (difficult), or allocate more than a single page (easier). + * + * 2. Because the size of the JOB handle field in the BD CTL / completion queue + * is 10-bit, we can have up to 1024 open jobs per hardware queue. + * Therefore, each queue can hold up to 1024 entries. + * + * HL_QUEUE_LENGTH is in units of struct hl_bd. + * HL_QUEUE_LENGTH * sizeof(struct hl_bd) should be <= HL_PAGE_SIZE + */ + +#define HL_PAGE_SIZE 4096 /* minimum page size */ +/* Must be power of 2 (HL_PAGE_SIZE / HL_BD_SIZE) */ #define HL_QUEUE_LENGTH 256 +#define HL_QUEUE_SIZE_IN_BYTES (HL_QUEUE_LENGTH * HL_BD_SIZE) + +/* + * HL_CQ_LENGTH is in units of struct hl_cq_entry. + * HL_CQ_LENGTH should be <= HL_PAGE_SIZE + */ +#define HL_CQ_LENGTH HL_QUEUE_LENGTH +#define HL_CQ_SIZE_IN_BYTES (HL_CQ_LENGTH * HL_CQ_ENTRY_SIZE) + + + +/** + * struct hl_hw_queue - describes a H/W transport queue. + * @shadow_queue: pointer to a shadow queue that holds pointers to jobs. + * @queue_type: type of queue. + * @kernel_address: holds the queue's kernel virtual address. + * @bus_address: holds the queue's DMA address. + * @pi: holds the queue's pi value. + * @ci: holds the queue's ci value, AS CALCULATED BY THE DRIVER (not real ci). + * @hw_queue_id: the id of the H/W queue. + * @int_queue_len: length of internal queue (number of entries). + * @valid: is the queue valid (we have array of 32 queues, not all of them + * exists). + */ +struct hl_hw_queue { + struct hl_cs_job **shadow_queue; + enum hl_queue_type queue_type; + u64 kernel_address; + dma_addr_t bus_address; + u32 pi; + u32 ci; + u32 hw_queue_id; + u16 int_queue_len; + u8 valid; +}; + +/** + * struct hl_cq - describes a completion queue + * @hdev: pointer to the device structure + * @kernel_address: holds the queue's kernel virtual address + * @bus_address: holds the queue's DMA address + * @hw_queue_id: the id of the matching H/W queue + * @ci: ci inside the queue + * @pi: pi inside the queue + * @free_slots_cnt: counter of free slots in queue + */ +struct hl_cq { + struct hl_device *hdev; + u64 kernel_address; + dma_addr_t bus_address; + u32 hw_queue_id; + u32 ci; + u32 pi; + atomic_t free_slots_cnt; +}; /* @@ -164,6 +275,8 @@ enum hl_asic_type { * @resume: handles IP specific H/W or SW changes for resume. * @mmap: mmap function, does nothing. * @cb_mmap: maps a CB. + * @ring_doorbell: increment PI on a given QMAN. + * @flush_pq_write: flush PQ entry write if necessary, WARN if flushing failed. * @dma_alloc_coherent: Allocate coherent DMA memory by calling * dma_alloc_coherent(). This is ASIC function because its * implementation is not trivial when the driver is loaded @@ -172,6 +285,16 @@ enum hl_asic_type { * This is ASIC function because its implementation is not * trivial when the driver is loaded in simulation mode * (not upstreamed). + * @get_int_queue_base: get the internal queue base address. + * @test_queues: run simple test on all queues for sanity check. + * @dma_pool_zalloc: small DMA allocation of coherent memory from DMA pool. + * size of allocation is HL_DMA_POOL_BLK_SIZE. + * @dma_pool_free: free small DMA allocation from pool. + * @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool. + * @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool. + * @hw_queues_lock: acquire H/W queues lock. + * @hw_queues_unlock: release H/W queues lock. + * @send_cpu_message: send buffer to ArmCP. */ struct hl_asic_funcs { int (*early_init)(struct hl_device *hdev); @@ -185,10 +308,27 @@ struct hl_asic_funcs { int (*mmap)(struct hl_fpriv *hpriv, struct vm_area_struct *vma); int (*cb_mmap)(struct hl_device *hdev, struct vm_area_struct *vma, u64 kaddress, phys_addr_t paddress, u32 size); + void (*ring_doorbell)(struct hl_device *hdev, u32 hw_queue_id, u32 pi); + void (*flush_pq_write)(struct hl_device *hdev, u64 *pq, u64 exp_val); void* (*dma_alloc_coherent)(struct hl_device *hdev, size_t size, dma_addr_t *dma_handle, gfp_t flag); void (*dma_free_coherent)(struct hl_device *hdev, size_t size, void *cpu_addr, dma_addr_t dma_handle); + void* (*get_int_queue_base)(struct hl_device *hdev, u32 queue_id, + dma_addr_t *dma_handle, u16 *queue_len); + int (*test_queues)(struct hl_device *hdev); + void* (*dma_pool_zalloc)(struct hl_device *hdev, size_t size, + gfp_t mem_flags, dma_addr_t *dma_handle); + void (*dma_pool_free)(struct hl_device *hdev, void *vaddr, + dma_addr_t dma_addr); + void* (*cpu_accessible_dma_pool_alloc)(struct hl_device *hdev, + size_t size, dma_addr_t *dma_handle); + void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev, + size_t size, void *vaddr); + void (*hw_queues_lock)(struct hl_device *hdev); + void (*hw_queues_unlock)(struct hl_device *hdev); + int (*send_cpu_message)(struct hl_device *hdev, u32 *msg, + u16 len, u32 timeout, long *result); }; @@ -224,6 +364,17 @@ struct hl_ctx_mgr { }; + + +/** + * struct hl_cs_job - command submission job. + * @finish_work: workqueue object to run when job is completed. + * @id: the id of this job inside a CS. + */ +struct hl_cs_job { + struct work_struct finish_work; + u32 id; +}; /* * FILE PRIVATE STRUCTURE */ @@ -298,7 +449,11 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val); * @dev: realted kernel basic device structure. * @asic_name: ASIC specific nmae. * @asic_type: ASIC specific type. + * @completion_queue: array of hl_cq. + * @cq_wq: work queue of completion queues for executing work in process context + * @eq_wq: work queue of event queue for executing work in process context. * @kernel_ctx: KMD context structure. + * @kernel_queues: array of hl_hw_queue. * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs. * @dma_pool: DMA pool for small allocations. * @cpu_accessible_dma_mem: KMD <-> ArmCP shared memory CPU address. @@ -312,6 +467,7 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val); * only a single process at a time. In addition, we need a * lock here so we can flush user processes which are opening * the device while we are trying to hard reset it + * @send_cpu_message_lock: enforces only one message in KMD <-> ArmCP queue. * @asic_prop: ASIC specific immutable properties. * @asic_funcs: ASIC specific functions. * @asic_specific: ASIC specific information to use only from ASIC files. @@ -331,7 +487,10 @@ struct hl_device { struct device *dev; char asic_name[16]; enum hl_asic_type asic_type; + struct hl_cq *completion_queue; + struct workqueue_struct *cq_wq; struct hl_ctx *kernel_ctx; + struct hl_hw_queue *kernel_queues; struct hl_cb_mgr kernel_cb_mgr; struct dma_pool *dma_pool; void *cpu_accessible_dma_mem; @@ -341,6 +500,7 @@ struct hl_device { struct mutex asid_mutex; /* TODO: remove fd_open_cnt_lock for multiple process support */ struct mutex fd_open_cnt_lock; + struct mutex send_cpu_message_lock; struct asic_fixed_properties asic_prop; const struct hl_asic_funcs *asic_funcs; void *asic_specific; @@ -358,6 +518,7 @@ struct hl_device { /* Parameters for bring-up */ u8 cpu_enable; u8 reset_pcilink; + u8 cpu_queues_enable; u8 fw_loading; u8 pldm; }; @@ -400,7 +561,18 @@ int hl_poll_timeout_memory(struct hl_device *hdev, u64 addr, u32 timeout_us, u32 *val); int hl_poll_timeout_device_memory(struct hl_device *hdev, void __iomem *addr, u32 timeout_us, u32 *val); - +int hl_hw_queues_create(struct hl_device *hdev); +void hl_hw_queues_destroy(struct hl_device *hdev); +int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id, + u32 cb_size, u64 cb_ptr); +u32 hl_hw_queue_add_ptr(u32 ptr, u16 val); +void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id); + +#define hl_queue_inc_ptr(p) hl_hw_queue_add_ptr(p, 1) +#define hl_pi_2_offset(pi) ((pi) & (HL_QUEUE_LENGTH - 1)) + +int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id); +void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q); int hl_asid_init(struct hl_device *hdev); void hl_asid_fini(struct hl_device *hdev); unsigned long hl_asid_alloc(struct hl_device *hdev); diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c index 59c2fd196659..93576249307b 100644 --- a/drivers/misc/habanalabs/habanalabs_drv.c +++ b/drivers/misc/habanalabs/habanalabs_drv.c @@ -169,6 +169,7 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev, /* Parameters for bring-up - set them to defaults */ hdev->cpu_enable = 1; hdev->reset_pcilink = 0; + hdev->cpu_queues_enable = 1; hdev->fw_loading = 1; hdev->pldm = 0; @@ -176,6 +177,10 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev, if (!hdev->cpu_enable) hdev->fw_loading = 0; + /* If we don't load FW, no need to initialize CPU queues */ + if (!hdev->fw_loading) + hdev->cpu_queues_enable = 0; + hdev->disabled = true; hdev->pdev = pdev; /* can be NULL in case of simulator device */ diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c new file mode 100644 index 000000000000..2ec43f36cdb8 --- /dev/null +++ b/drivers/misc/habanalabs/hw_queue.c @@ -0,0 +1,400 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" + +#include + +/* + * hl_queue_add_ptr - add to pi or ci and checks if it wraps around + * + * @ptr: the current pi/ci value + * @val: the amount to add + * + * Add val to ptr. It can go until twice the queue length. + */ +inline u32 hl_hw_queue_add_ptr(u32 ptr, u16 val) +{ + ptr += val; + ptr &= ((HL_QUEUE_LENGTH << 1) - 1); + return ptr; +} + +static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len) +{ + int delta = (q->pi - q->ci); + + if (delta >= 0) + return (queue_len - delta); + else + return (abs(delta) - queue_len); +} + +/* + * ext_queue_submit_bd - Submit a buffer descriptor to an external queue + * + * @hdev: pointer to habanalabs device structure + * @q: pointer to habanalabs queue structure + * @ctl: BD's control word + * @len: BD's length + * @ptr: BD's pointer + * + * This function assumes there is enough space on the queue to submit a new + * BD to it. It initializes the next BD and calls the device specific + * function to set the pi (and doorbell) + * + * This function must be called when the scheduler mutex is taken + * + */ +static void ext_queue_submit_bd(struct hl_device *hdev, struct hl_hw_queue *q, + u32 ctl, u32 len, u64 ptr) +{ + struct hl_bd *bd; + + bd = (struct hl_bd *) (uintptr_t) q->kernel_address; + bd += hl_pi_2_offset(q->pi); + bd->ctl = ctl; + bd->len = len; + bd->ptr = ptr + hdev->asic_prop.host_phys_base_address; + + q->pi = hl_queue_inc_ptr(q->pi); + hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); +} + +/* + * ext_queue_sanity_checks - perform some sanity checks on external queue + * + * @hdev : pointer to hl_device structure + * @q : pointer to hl_hw_queue structure + * @num_of_entries : how many entries to check for space + * @reserve_cq_entry : whether to reserve an entry in the cq + * + * H/W queues spinlock should be taken before calling this function + * + * Perform the following: + * - Make sure we have enough space in the h/w queue + * - Make sure we have enough space in the completion queue + * - Reserve space in the completion queue (needs to be reversed if there + * is a failure down the road before the actual submission of work). Only + * do this action if reserve_cq_entry is true + * + */ +static int ext_queue_sanity_checks(struct hl_device *hdev, + struct hl_hw_queue *q, int num_of_entries, + bool reserve_cq_entry) +{ + atomic_t *free_slots = + &hdev->completion_queue[q->hw_queue_id].free_slots_cnt; + int free_slots_cnt; + + /* Check we have enough space in the queue */ + free_slots_cnt = queue_free_slots(q, HL_QUEUE_LENGTH); + + if (free_slots_cnt < num_of_entries) { + dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n", + q->hw_queue_id, num_of_entries); + return -EAGAIN; + } + + if (reserve_cq_entry) { + /* + * Check we have enough space in the completion queue + * Add -1 to counter (decrement) unless counter was already 0 + * In that case, CQ is full so we can't submit a new CB because + * we won't get ack on its completion + * atomic_add_unless will return 0 if counter was already 0 + */ + if (atomic_add_negative(num_of_entries * -1, free_slots)) { + dev_dbg(hdev->dev, "No space for %d on CQ %d\n", + num_of_entries, q->hw_queue_id); + atomic_add(num_of_entries, free_slots); + return -EAGAIN; + } + } + + return 0; +} + +/* + * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion + * + * @hdev: pointer to hl_device structure + * @hw_queue_id: Queue's type + * @cb_size: size of CB + * @cb_ptr: pointer to CB location + * + * This function sends a single CB, that must NOT generate a completion entry + * + */ +int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id, + u32 cb_size, u64 cb_ptr) +{ + struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id]; + int rc; + + /* + * The CPU queue is a synchronous queue with an effective depth of + * a single entry (although it is allocated with room for multiple + * entries). Therefore, there is a different lock, called + * send_cpu_message_lock, that serializes accesses to the CPU queue. + * As a result, we don't need to lock the access to the entire H/W + * queues module when submitting a JOB to the CPU queue + */ + if (q->queue_type != QUEUE_TYPE_CPU) + hdev->asic_funcs->hw_queues_lock(hdev); + + if (hdev->disabled) { + rc = -EPERM; + goto out; + } + + rc = ext_queue_sanity_checks(hdev, q, 1, false); + if (rc) + goto out; + + ext_queue_submit_bd(hdev, q, 0, cb_size, cb_ptr); + +out: + if (q->queue_type != QUEUE_TYPE_CPU) + hdev->asic_funcs->hw_queues_unlock(hdev); + + return rc; +} + +/* + * hl_hw_queue_inc_ci_kernel - increment ci for kernel's queue + * + * @hdev: pointer to hl_device structure + * @hw_queue_id: which queue to increment its ci + */ +void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id) +{ + struct hl_hw_queue *q = &hdev->kernel_queues[hw_queue_id]; + + q->ci = hl_queue_inc_ptr(q->ci); +} + +static int ext_and_cpu_hw_queue_init(struct hl_device *hdev, + struct hl_hw_queue *q) +{ + void *p; + int rc; + + p = hdev->asic_funcs->dma_alloc_coherent(hdev, + HL_QUEUE_SIZE_IN_BYTES, + &q->bus_address, GFP_KERNEL | __GFP_ZERO); + if (!p) + return -ENOMEM; + + q->kernel_address = (u64) (uintptr_t) p; + + q->shadow_queue = kmalloc_array(HL_QUEUE_LENGTH, + sizeof(*q->shadow_queue), + GFP_KERNEL); + if (!q->shadow_queue) { + dev_err(hdev->dev, + "Failed to allocate shadow queue for H/W queue %d\n", + q->hw_queue_id); + rc = -ENOMEM; + goto free_queue; + } + + /* Make sure read/write pointers are initialized to start of queue */ + q->ci = 0; + q->pi = 0; + + return 0; + +free_queue: + hdev->asic_funcs->dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, + (void *) (uintptr_t) q->kernel_address, q->bus_address); + + return rc; +} + +static int int_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) +{ + void *p; + + p = hdev->asic_funcs->get_int_queue_base(hdev, q->hw_queue_id, + &q->bus_address, &q->int_queue_len); + if (!p) { + dev_err(hdev->dev, + "Failed to get base address for internal queue %d\n", + q->hw_queue_id); + return -EFAULT; + } + + q->kernel_address = (u64) (uintptr_t) p; + q->pi = 0; + q->ci = 0; + + return 0; +} + +static int cpu_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) +{ + return ext_and_cpu_hw_queue_init(hdev, q); +} + +static int ext_hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q) +{ + return ext_and_cpu_hw_queue_init(hdev, q); +} + +/* + * hw_queue_init - main initialization function for H/W queue object + * + * @hdev: pointer to hl_device device structure + * @q: pointer to hl_hw_queue queue structure + * @hw_queue_id: The id of the H/W queue + * + * Allocate dma-able memory for the queue and initialize fields + * Returns 0 on success + */ +static int hw_queue_init(struct hl_device *hdev, struct hl_hw_queue *q, + u32 hw_queue_id) +{ + int rc; + + BUILD_BUG_ON(HL_QUEUE_SIZE_IN_BYTES > HL_PAGE_SIZE); + + q->hw_queue_id = hw_queue_id; + + switch (q->queue_type) { + case QUEUE_TYPE_EXT: + rc = ext_hw_queue_init(hdev, q); + break; + + case QUEUE_TYPE_INT: + rc = int_hw_queue_init(hdev, q); + break; + + case QUEUE_TYPE_CPU: + rc = cpu_hw_queue_init(hdev, q); + break; + + case QUEUE_TYPE_NA: + q->valid = 0; + return 0; + + default: + dev_crit(hdev->dev, "wrong queue type %d during init\n", + q->queue_type); + rc = -EINVAL; + break; + } + + if (rc) + return rc; + + q->valid = 1; + + return 0; +} + +/* + * hw_queue_fini - destroy queue + * + * @hdev: pointer to hl_device device structure + * @q: pointer to hl_hw_queue queue structure + * + * Free the queue memory + */ +static void hw_queue_fini(struct hl_device *hdev, struct hl_hw_queue *q) +{ + if (!q->valid) + return; + + /* + * If we arrived here, there are no jobs waiting on this queue + * so we can safely remove it. + * This is because this function can only called when: + * 1. Either a context is deleted, which only can occur if all its + * jobs were finished + * 2. A context wasn't able to be created due to failure or timeout, + * which means there are no jobs on the queue yet + * + * The only exception are the queues of the kernel context, but + * if they are being destroyed, it means that the entire module is + * being removed. If the module is removed, it means there is no open + * user context. It also means that if a job was submitted by + * the kernel driver (e.g. context creation), the job itself was + * released by the kernel driver when a timeout occurred on its + * Completion. Thus, we don't need to release it again. + */ + + if (q->queue_type == QUEUE_TYPE_INT) + return; + + kfree(q->shadow_queue); + + hdev->asic_funcs->dma_free_coherent(hdev, HL_QUEUE_SIZE_IN_BYTES, + (void *) (uintptr_t) q->kernel_address, q->bus_address); +} + +int hl_hw_queues_create(struct hl_device *hdev) +{ + struct asic_fixed_properties *asic = &hdev->asic_prop; + struct hl_hw_queue *q; + int i, rc, q_ready_cnt; + + hdev->kernel_queues = kcalloc(HL_MAX_QUEUES, + sizeof(*hdev->kernel_queues), GFP_KERNEL); + + if (!hdev->kernel_queues) { + dev_err(hdev->dev, "Not enough memory for H/W queues\n"); + return -ENOMEM; + } + + /* Initialize the H/W queues */ + for (i = 0, q_ready_cnt = 0, q = hdev->kernel_queues; + i < HL_MAX_QUEUES ; i++, q_ready_cnt++, q++) { + + q->queue_type = asic->hw_queues_props[i].type; + rc = hw_queue_init(hdev, q, i); + if (rc) { + dev_err(hdev->dev, + "failed to initialize queue %d\n", i); + goto release_queues; + } + } + + return 0; + +release_queues: + for (i = 0, q = hdev->kernel_queues ; i < q_ready_cnt ; i++, q++) + hw_queue_fini(hdev, q); + + kfree(hdev->kernel_queues); + + return rc; +} + +void hl_hw_queues_destroy(struct hl_device *hdev) +{ + struct hl_hw_queue *q; + int i; + + for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++) + hw_queue_fini(hdev, q); + + kfree(hdev->kernel_queues); +} + +void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset) +{ + struct hl_hw_queue *q; + int i; + + for (i = 0, q = hdev->kernel_queues ; i < HL_MAX_QUEUES ; i++, q++) { + if ((!q->valid) || + ((!hard_reset) && (q->queue_type == QUEUE_TYPE_CPU))) + continue; + q->pi = q->ci = 0; + } +} diff --git a/drivers/misc/habanalabs/include/armcp_if.h b/drivers/misc/habanalabs/include/armcp_if.h index 85fc2efe144b..cc37003aa6b7 100644 --- a/drivers/misc/habanalabs/include/armcp_if.h +++ b/drivers/misc/habanalabs/include/armcp_if.h @@ -10,10 +10,302 @@ #include +enum pq_init_status { + PQ_INIT_STATUS_NA = 0, + PQ_INIT_STATUS_READY_FOR_CP, + PQ_INIT_STATUS_READY_FOR_HOST +}; + +/* + * ArmCP Primary Queue Packets + * + * During normal operation, KMD needs to send various messages to ArmCP, + * usually either to SET some value into a H/W periphery or to GET the current + * value of some H/W periphery. For example, SET the frequency of MME/TPC and + * GET the value of the thermal sensor. + * + * These messages can be initiated either by the User application or by KMD + * itself, e.g. power management code. In either case, the communication from + * KMD to ArmCP will *always* be in synchronous mode, meaning that KMD will + * send a single message and poll until the message was acknowledged and the + * results are ready (if results are needed). + * + * This means that only a single message can be sent at a time and KMD must + * wait for its result before sending the next message. Having said that, + * because these are control messages which are sent in a relatively low + * frequency, this limitation seems acceptable. It's important to note that + * in case of multiple devices, messages to different devices *can* be sent + * at the same time. + * + * The message, inputs/outputs (if relevant) and fence object will be located + * on the device DDR at an address that will be determined by KMD. During + * device initialization phase, KMD will pass to ArmCP that address. Most of + * the message types will contain inputs/outputs inside the message itself. + * The common part of each message will contain the opcode of the message (its + * type) and a field representing a fence object. + * + * When KMD wishes to send a message to ArmCP, it will write the message + * contents to the device DDR, clear the fence object and then write the + * value 484 to the mmGIC_DISTRIBUTOR__5_GICD_SETSPI_NSR register to issue + * the 484 interrupt-id to the ARM core. + * + * Upon receiving the 484 interrupt-id, ArmCP will read the message from the + * DDR. In case the message is a SET operation, ArmCP will first perform the + * operation and then write to the fence object on the device DDR. In case the + * message is a GET operation, ArmCP will first fill the results section on the + * device DDR and then write to the fence object. If an error occurred, ArmCP + * will fill the rc field with the right error code. + * + * In the meantime, KMD will poll on the fence object. Once KMD sees that the + * fence object is signaled, it will read the results from the device DDR + * (if relevant) and resume the code execution in KMD. + * + * To use QMAN packets, the opcode must be the QMAN opcode, shifted by 8 + * so the value being put by the KMD matches the value read by ArmCP + * + * Non-QMAN packets should be limited to values 1 through (2^8 - 1) + * + * Detailed description: + * + * ARMCP_PACKET_DISABLE_PCI_ACCESS - + * After receiving this packet the embedded CPU must NOT issue PCI + * transactions (read/write) towards the Host CPU. This also include + * sending MSI-X interrupts. + * This packet is usually sent before the device is moved to D3Hot state. + * + * ARMCP_PACKET_ENABLE_PCI_ACCESS - + * After receiving this packet the embedded CPU is allowed to issue PCI + * transactions towards the Host CPU, including sending MSI-X interrupts. + * This packet is usually send after the device is moved to D0 state. + * + * ARMCP_PACKET_TEMPERATURE_GET - + * Fetch the current temperature / Max / Max Hyst / Critical / + * Critical Hyst of a specified thermal sensor. The packet's + * arguments specify the desired sensor and the field to get. + * + * ARMCP_PACKET_VOLTAGE_GET - + * Fetch the voltage / Max / Min of a specified sensor. The packet's + * arguments specify the sensor and type. + * + * ARMCP_PACKET_CURRENT_GET - + * Fetch the current / Max / Min of a specified sensor. The packet's + * arguments specify the sensor and type. + * + * ARMCP_PACKET_FAN_SPEED_GET - + * Fetch the speed / Max / Min of a specified fan. The packet's + * arguments specify the sensor and type. + * + * ARMCP_PACKET_PWM_GET - + * Fetch the pwm value / mode of a specified pwm. The packet's + * arguments specify the sensor and type. + * + * ARMCP_PACKET_PWM_SET - + * Set the pwm value / mode of a specified pwm. The packet's + * arguments specify the sensor, type and value. + * + * ARMCP_PACKET_FREQUENCY_SET - + * Set the frequency of a specified PLL. The packet's arguments specify + * the PLL and the desired frequency. The actual frequency in the device + * might differ from the requested frequency. + * + * ARMCP_PACKET_FREQUENCY_GET - + * Fetch the frequency of a specified PLL. The packet's arguments specify + * the PLL. + * + * ARMCP_PACKET_LED_SET - + * Set the state of a specified led. The packet's arguments + * specify the led and the desired state. + * + * ARMCP_PACKET_I2C_WR - + * Write 32-bit value to I2C device. The packet's arguments specify the + * I2C bus, address and value. + * + * ARMCP_PACKET_I2C_RD - + * Read 32-bit value from I2C device. The packet's arguments specify the + * I2C bus and address. + * + * ARMCP_PACKET_INFO_GET - + * Fetch information from the device as specified in the packet's + * structure. KMD passes the max size it allows the ArmCP to write to + * the structure, to prevent data corruption in case of mismatched + * KMD/FW versions. + * + * ARMCP_PACKET_FLASH_PROGRAM_REMOVED - this packet was removed + * + * ARMCP_PACKET_UNMASK_RAZWI_IRQ - + * Unmask the given IRQ. The IRQ number is specified in the value field. + * The packet is sent after receiving an interrupt and printing its + * relevant information. + * + * ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY - + * Unmask the given IRQs. The IRQs numbers are specified in an array right + * after the armcp_packet structure, where its first element is the array + * length. The packet is sent after a soft reset was done in order to + * handle any interrupts that were sent during the reset process. + * + * ARMCP_PACKET_TEST - + * Test packet for ArmCP connectivity. The CPU will put the fence value + * in the result field. + * + * ARMCP_PACKET_FREQUENCY_CURR_GET - + * Fetch the current frequency of a specified PLL. The packet's arguments + * specify the PLL. + * + * ARMCP_PACKET_MAX_POWER_GET - + * Fetch the maximal power of the device. + * + * ARMCP_PACKET_MAX_POWER_SET - + * Set the maximal power of the device. The packet's arguments specify + * the power. + * + * ARMCP_PACKET_EEPROM_DATA_GET - + * Get EEPROM data from the ArmCP kernel. The buffer is specified in the + * addr field. The CPU will put the returned data size in the result + * field. In addition, KMD passes the max size it allows the ArmCP to + * write to the structure, to prevent data corruption in case of + * mismatched KMD/FW versions. + * + */ + +enum armcp_packet_id { + ARMCP_PACKET_DISABLE_PCI_ACCESS = 1, /* internal */ + ARMCP_PACKET_ENABLE_PCI_ACCESS, /* internal */ + ARMCP_PACKET_TEMPERATURE_GET, /* sysfs */ + ARMCP_PACKET_VOLTAGE_GET, /* sysfs */ + ARMCP_PACKET_CURRENT_GET, /* sysfs */ + ARMCP_PACKET_FAN_SPEED_GET, /* sysfs */ + ARMCP_PACKET_PWM_GET, /* sysfs */ + ARMCP_PACKET_PWM_SET, /* sysfs */ + ARMCP_PACKET_FREQUENCY_SET, /* sysfs */ + ARMCP_PACKET_FREQUENCY_GET, /* sysfs */ + ARMCP_PACKET_LED_SET, /* debugfs */ + ARMCP_PACKET_I2C_WR, /* debugfs */ + ARMCP_PACKET_I2C_RD, /* debugfs */ + ARMCP_PACKET_INFO_GET, /* IOCTL */ + ARMCP_PACKET_FLASH_PROGRAM_REMOVED, + ARMCP_PACKET_UNMASK_RAZWI_IRQ, /* internal */ + ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY, /* internal */ + ARMCP_PACKET_TEST, /* internal */ + ARMCP_PACKET_FREQUENCY_CURR_GET, /* sysfs */ + ARMCP_PACKET_MAX_POWER_GET, /* sysfs */ + ARMCP_PACKET_MAX_POWER_SET, /* sysfs */ + ARMCP_PACKET_EEPROM_DATA_GET, /* sysfs */ +}; + +#define ARMCP_PACKET_FENCE_VAL 0xFE8CE7A5 + +#define ARMCP_PKT_CTL_RC_SHIFT 12 +#define ARMCP_PKT_CTL_RC_MASK 0x0000F000 + +#define ARMCP_PKT_CTL_OPCODE_SHIFT 16 +#define ARMCP_PKT_CTL_OPCODE_MASK 0x1FFF0000 + +struct armcp_packet { + union { + __le64 value; /* For SET packets */ + __le64 result; /* For GET packets */ + __le64 addr; /* For PQ */ + }; + + __le32 ctl; + + __le32 fence; /* Signal to KMD that message is completed */ + + union { + struct {/* For temperature/current/voltage/fan/pwm get/set */ + __le16 sensor_index; + __le16 type; + }; + + struct { /* For I2C read/write */ + __u8 i2c_bus; + __u8 i2c_addr; + __u8 i2c_reg; + __u8 pad; /* unused */ + }; + + /* For frequency get/set */ + __le32 pll_index; + + /* For led set */ + __le32 led_index; + + /* For get Armcp info/EEPROM data */ + __le32 data_max_size; + }; +}; + +struct armcp_unmask_irq_arr_packet { + struct armcp_packet armcp_pkt; + __le32 length; + __le32 irqs[0]; +}; + +enum armcp_packet_rc { + armcp_packet_success, + armcp_packet_invalid, + armcp_packet_fault +}; + +enum armcp_temp_type { + armcp_temp_input, + armcp_temp_max = 6, + armcp_temp_max_hyst, + armcp_temp_crit, + armcp_temp_crit_hyst +}; + +enum armcp_in_attributes { + armcp_in_input, + armcp_in_min, + armcp_in_max +}; + +enum armcp_curr_attributes { + armcp_curr_input, + armcp_curr_min, + armcp_curr_max +}; + +enum armcp_fan_attributes { + armcp_fan_input, + armcp_fan_min = 2, + armcp_fan_max +}; + +enum armcp_pwm_attributes { + armcp_pwm_input, + armcp_pwm_enable +}; + +/* Event Queue Packets */ + +struct eq_generic_event { + __le64 data[7]; +}; + /* * ArmCP info */ #define VERSION_MAX_LEN 128 +#define ARMCP_MAX_SENSORS 128 + +struct armcp_sensor { + __le32 type; + __le32 flags; +}; + +struct armcp_info { + struct armcp_sensor sensors[ARMCP_MAX_SENSORS]; + __u8 kernel_version[VERSION_MAX_LEN]; + __le32 reserved[3]; + __le32 cpld_version; + __le32 infineon_version; + __u8 fuse_version[VERSION_MAX_LEN]; + __u8 thermal_version[VERSION_MAX_LEN]; + __u8 armcp_version[VERSION_MAX_LEN]; + __le64 dram_size; +}; #endif /* ARMCP_IF_H */ diff --git a/drivers/misc/habanalabs/include/goya/goya_async_events.h b/drivers/misc/habanalabs/include/goya/goya_async_events.h new file mode 100644 index 000000000000..497937a17ee9 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/goya_async_events.h @@ -0,0 +1,186 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef __GOYA_ASYNC_EVENTS_H_ +#define __GOYA_ASYNC_EVENTS_H_ + +enum goya_async_event_id { + GOYA_ASYNC_EVENT_ID_PCIE_IF = 33, + GOYA_ASYNC_EVENT_ID_TPC0_ECC = 36, + GOYA_ASYNC_EVENT_ID_TPC1_ECC = 39, + GOYA_ASYNC_EVENT_ID_TPC2_ECC = 42, + GOYA_ASYNC_EVENT_ID_TPC3_ECC = 45, + GOYA_ASYNC_EVENT_ID_TPC4_ECC = 48, + GOYA_ASYNC_EVENT_ID_TPC5_ECC = 51, + GOYA_ASYNC_EVENT_ID_TPC6_ECC = 54, + GOYA_ASYNC_EVENT_ID_TPC7_ECC = 57, + GOYA_ASYNC_EVENT_ID_MME_ECC = 60, + GOYA_ASYNC_EVENT_ID_MME_ECC_EXT = 61, + GOYA_ASYNC_EVENT_ID_MMU_ECC = 63, + GOYA_ASYNC_EVENT_ID_DMA_MACRO = 64, + GOYA_ASYNC_EVENT_ID_DMA_ECC = 66, + GOYA_ASYNC_EVENT_ID_CPU_IF_ECC = 75, + GOYA_ASYNC_EVENT_ID_PSOC_MEM = 78, + GOYA_ASYNC_EVENT_ID_PSOC_CORESIGHT = 79, + GOYA_ASYNC_EVENT_ID_SRAM0 = 81, + GOYA_ASYNC_EVENT_ID_SRAM1 = 82, + GOYA_ASYNC_EVENT_ID_SRAM2 = 83, + GOYA_ASYNC_EVENT_ID_SRAM3 = 84, + GOYA_ASYNC_EVENT_ID_SRAM4 = 85, + GOYA_ASYNC_EVENT_ID_SRAM5 = 86, + GOYA_ASYNC_EVENT_ID_SRAM6 = 87, + GOYA_ASYNC_EVENT_ID_SRAM7 = 88, + GOYA_ASYNC_EVENT_ID_SRAM8 = 89, + GOYA_ASYNC_EVENT_ID_SRAM9 = 90, + GOYA_ASYNC_EVENT_ID_SRAM10 = 91, + GOYA_ASYNC_EVENT_ID_SRAM11 = 92, + GOYA_ASYNC_EVENT_ID_SRAM12 = 93, + GOYA_ASYNC_EVENT_ID_SRAM13 = 94, + GOYA_ASYNC_EVENT_ID_SRAM14 = 95, + GOYA_ASYNC_EVENT_ID_SRAM15 = 96, + GOYA_ASYNC_EVENT_ID_SRAM16 = 97, + GOYA_ASYNC_EVENT_ID_SRAM17 = 98, + GOYA_ASYNC_EVENT_ID_SRAM18 = 99, + GOYA_ASYNC_EVENT_ID_SRAM19 = 100, + GOYA_ASYNC_EVENT_ID_SRAM20 = 101, + GOYA_ASYNC_EVENT_ID_SRAM21 = 102, + GOYA_ASYNC_EVENT_ID_SRAM22 = 103, + GOYA_ASYNC_EVENT_ID_SRAM23 = 104, + GOYA_ASYNC_EVENT_ID_SRAM24 = 105, + GOYA_ASYNC_EVENT_ID_SRAM25 = 106, + GOYA_ASYNC_EVENT_ID_SRAM26 = 107, + GOYA_ASYNC_EVENT_ID_SRAM27 = 108, + GOYA_ASYNC_EVENT_ID_SRAM28 = 109, + GOYA_ASYNC_EVENT_ID_SRAM29 = 110, + GOYA_ASYNC_EVENT_ID_GIC500 = 112, + GOYA_ASYNC_EVENT_ID_PCIE_DEC = 115, + GOYA_ASYNC_EVENT_ID_TPC0_DEC = 117, + GOYA_ASYNC_EVENT_ID_TPC1_DEC = 120, + GOYA_ASYNC_EVENT_ID_TPC2_DEC = 123, + GOYA_ASYNC_EVENT_ID_TPC3_DEC = 126, + GOYA_ASYNC_EVENT_ID_TPC4_DEC = 129, + GOYA_ASYNC_EVENT_ID_TPC5_DEC = 132, + GOYA_ASYNC_EVENT_ID_TPC6_DEC = 135, + GOYA_ASYNC_EVENT_ID_TPC7_DEC = 138, + GOYA_ASYNC_EVENT_ID_AXI_ECC = 139, + GOYA_ASYNC_EVENT_ID_L2_RAM_ECC = 140, + GOYA_ASYNC_EVENT_ID_MME_WACS = 141, + GOYA_ASYNC_EVENT_ID_MME_WACSD = 142, + GOYA_ASYNC_EVENT_ID_PLL0 = 143, + GOYA_ASYNC_EVENT_ID_PLL1 = 144, + GOYA_ASYNC_EVENT_ID_PLL3 = 146, + GOYA_ASYNC_EVENT_ID_PLL4 = 147, + GOYA_ASYNC_EVENT_ID_PLL5 = 148, + GOYA_ASYNC_EVENT_ID_PLL6 = 149, + GOYA_ASYNC_EVENT_ID_CPU_AXI_SPLITTER = 155, + GOYA_ASYNC_EVENT_ID_PSOC_AXI_DEC = 159, + GOYA_ASYNC_EVENT_ID_PSOC = 160, + GOYA_ASYNC_EVENT_ID_PCIE_FLR = 171, + GOYA_ASYNC_EVENT_ID_PCIE_HOT_RESET = 172, + GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG0 = 174, + GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG1 = 175, + GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG2 = 176, + GOYA_ASYNC_EVENT_ID_PCIE_QID0_ENG3 = 177, + GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG0 = 178, + GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG1 = 179, + GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG2 = 180, + GOYA_ASYNC_EVENT_ID_PCIE_QID1_ENG3 = 181, + GOYA_ASYNC_EVENT_ID_PCIE_APB = 182, + GOYA_ASYNC_EVENT_ID_PCIE_QDB = 183, + GOYA_ASYNC_EVENT_ID_PCIE_BM_D_P_WR = 184, + GOYA_ASYNC_EVENT_ID_PCIE_BM_D_RD = 185, + GOYA_ASYNC_EVENT_ID_PCIE_BM_U_P_WR = 186, + GOYA_ASYNC_EVENT_ID_PCIE_BM_U_RD = 187, + GOYA_ASYNC_EVENT_ID_TPC0_BMON_SPMU = 190, + GOYA_ASYNC_EVENT_ID_TPC0_KRN_ERR = 191, + GOYA_ASYNC_EVENT_ID_TPC1_BMON_SPMU = 200, + GOYA_ASYNC_EVENT_ID_TPC1_KRN_ERR = 201, + GOYA_ASYNC_EVENT_ID_TPC2_BMON_SPMU = 210, + GOYA_ASYNC_EVENT_ID_TPC2_KRN_ERR = 211, + GOYA_ASYNC_EVENT_ID_TPC3_BMON_SPMU = 220, + GOYA_ASYNC_EVENT_ID_TPC3_KRN_ERR = 221, + GOYA_ASYNC_EVENT_ID_TPC4_BMON_SPMU = 230, + GOYA_ASYNC_EVENT_ID_TPC4_KRN_ERR = 231, + GOYA_ASYNC_EVENT_ID_TPC5_BMON_SPMU = 240, + GOYA_ASYNC_EVENT_ID_TPC5_KRN_ERR = 241, + GOYA_ASYNC_EVENT_ID_TPC6_BMON_SPMU = 250, + GOYA_ASYNC_EVENT_ID_TPC6_KRN_ERR = 251, + GOYA_ASYNC_EVENT_ID_TPC7_BMON_SPMU = 260, + GOYA_ASYNC_EVENT_ID_TPC7_KRN_ERR = 261, + GOYA_ASYNC_EVENT_ID_MMU_SBA_SPMU0 = 270, + GOYA_ASYNC_EVENT_ID_MMU_SBA_SPMU1 = 271, + GOYA_ASYNC_EVENT_ID_MME_WACS_UP = 272, + GOYA_ASYNC_EVENT_ID_MME_WACS_DOWN = 273, + GOYA_ASYNC_EVENT_ID_MMU_PAGE_FAULT = 280, + GOYA_ASYNC_EVENT_ID_MMU_WR_PERM = 281, + GOYA_ASYNC_EVENT_ID_MMU_DBG_BM = 282, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH0 = 290, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH1 = 291, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH2 = 292, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH3 = 293, + GOYA_ASYNC_EVENT_ID_DMA_BM_CH4 = 294, + GOYA_ASYNC_EVENT_ID_DDR0_PHY_DFI = 300, + GOYA_ASYNC_EVENT_ID_DDR0_ECC_SCRUB = 301, + GOYA_ASYNC_EVENT_ID_DDR0_DB_ECC = 302, + GOYA_ASYNC_EVENT_ID_DDR0_SB_ECC = 303, + GOYA_ASYNC_EVENT_ID_DDR0_SB_ECC_MC = 304, + GOYA_ASYNC_EVENT_ID_DDR0_AXI_RD = 305, + GOYA_ASYNC_EVENT_ID_DDR0_AXI_WR = 306, + GOYA_ASYNC_EVENT_ID_DDR1_PHY_DFI = 310, + GOYA_ASYNC_EVENT_ID_DDR1_ECC_SCRUB = 311, + GOYA_ASYNC_EVENT_ID_DDR1_DB_ECC = 312, + GOYA_ASYNC_EVENT_ID_DDR1_SB_ECC = 313, + GOYA_ASYNC_EVENT_ID_DDR1_SB_ECC_MC = 314, + GOYA_ASYNC_EVENT_ID_DDR1_AXI_RD = 315, + GOYA_ASYNC_EVENT_ID_DDR1_AXI_WR = 316, + GOYA_ASYNC_EVENT_ID_CPU_BMON = 320, + GOYA_ASYNC_EVENT_ID_TS_EAST = 322, + GOYA_ASYNC_EVENT_ID_TS_WEST = 323, + GOYA_ASYNC_EVENT_ID_TS_NORTH = 324, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_0 = 330, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_1 = 331, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_U16_2 = 332, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_05_SW_RESET = 356, + GOYA_ASYNC_EVENT_ID_PSOC_GPIO_10_VRHOT_ICRIT = 361, + GOYA_ASYNC_EVENT_ID_TPC0_CMDQ = 430, + GOYA_ASYNC_EVENT_ID_TPC1_CMDQ = 431, + GOYA_ASYNC_EVENT_ID_TPC2_CMDQ = 432, + GOYA_ASYNC_EVENT_ID_TPC3_CMDQ = 433, + GOYA_ASYNC_EVENT_ID_TPC4_CMDQ = 434, + GOYA_ASYNC_EVENT_ID_TPC5_CMDQ = 435, + GOYA_ASYNC_EVENT_ID_TPC6_CMDQ = 436, + GOYA_ASYNC_EVENT_ID_TPC7_CMDQ = 437, + GOYA_ASYNC_EVENT_ID_TPC0_QM = 438, + GOYA_ASYNC_EVENT_ID_TPC1_QM = 439, + GOYA_ASYNC_EVENT_ID_TPC2_QM = 440, + GOYA_ASYNC_EVENT_ID_TPC3_QM = 441, + GOYA_ASYNC_EVENT_ID_TPC4_QM = 442, + GOYA_ASYNC_EVENT_ID_TPC5_QM = 443, + GOYA_ASYNC_EVENT_ID_TPC6_QM = 444, + GOYA_ASYNC_EVENT_ID_TPC7_QM = 445, + GOYA_ASYNC_EVENT_ID_MME_QM = 447, + GOYA_ASYNC_EVENT_ID_MME_CMDQ = 448, + GOYA_ASYNC_EVENT_ID_DMA0_QM = 449, + GOYA_ASYNC_EVENT_ID_DMA1_QM = 450, + GOYA_ASYNC_EVENT_ID_DMA2_QM = 451, + GOYA_ASYNC_EVENT_ID_DMA3_QM = 452, + GOYA_ASYNC_EVENT_ID_DMA4_QM = 453, + GOYA_ASYNC_EVENT_ID_DMA_ON_HBW = 454, + GOYA_ASYNC_EVENT_ID_DMA0_CH = 455, + GOYA_ASYNC_EVENT_ID_DMA1_CH = 456, + GOYA_ASYNC_EVENT_ID_DMA2_CH = 457, + GOYA_ASYNC_EVENT_ID_DMA3_CH = 458, + GOYA_ASYNC_EVENT_ID_DMA4_CH = 459, + GOYA_ASYNC_EVENT_ID_PI_UPDATE = 484, + GOYA_ASYNC_EVENT_ID_HALT_MACHINE = 485, + GOYA_ASYNC_EVENT_ID_INTS_REGISTER = 486, + GOYA_ASYNC_EVENT_ID_SOFT_RESET = 487, + GOYA_ASYNC_EVENT_ID_LAST_VALID_ID = 1023, + GOYA_ASYNC_EVENT_ID_SIZE +}; + +#endif /* __GOYA_ASYNC_EVENTS_H_ */ diff --git a/drivers/misc/habanalabs/include/goya/goya_packets.h b/drivers/misc/habanalabs/include/goya/goya_packets.h new file mode 100644 index 000000000000..a14407b975e4 --- /dev/null +++ b/drivers/misc/habanalabs/include/goya/goya_packets.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2017-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef GOYA_PACKETS_H +#define GOYA_PACKETS_H + +#include + +#define PACKET_HEADER_PACKET_ID_SHIFT 56 +#define PACKET_HEADER_PACKET_ID_MASK 0x1F00000000000000ull + +enum packet_id { + PACKET_WREG_32 = 0x1, + PACKET_WREG_BULK = 0x2, + PACKET_MSG_LONG = 0x3, + PACKET_MSG_SHORT = 0x4, + PACKET_CP_DMA = 0x5, + PACKET_MSG_PROT = 0x7, + PACKET_FENCE = 0x8, + PACKET_LIN_DMA = 0x9, + PACKET_NOP = 0xA, + PACKET_STOP = 0xB, + MAX_PACKET_ID = (PACKET_HEADER_PACKET_ID_MASK >> + PACKET_HEADER_PACKET_ID_SHIFT) + 1 +}; + +enum goya_dma_direction { + DMA_HOST_TO_DRAM, + DMA_HOST_TO_SRAM, + DMA_DRAM_TO_SRAM, + DMA_SRAM_TO_DRAM, + DMA_SRAM_TO_HOST, + DMA_DRAM_TO_HOST, + DMA_DRAM_TO_DRAM, + DMA_SRAM_TO_SRAM, + DMA_ENUM_MAX +}; + +#define GOYA_PKT_CTL_OPCODE_SHIFT 24 +#define GOYA_PKT_CTL_OPCODE_MASK 0x1F000000 + +#define GOYA_PKT_CTL_EB_SHIFT 29 +#define GOYA_PKT_CTL_EB_MASK 0x20000000 + +#define GOYA_PKT_CTL_RB_SHIFT 30 +#define GOYA_PKT_CTL_RB_MASK 0x40000000 + +#define GOYA_PKT_CTL_MB_SHIFT 31 +#define GOYA_PKT_CTL_MB_MASK 0x80000000 + +struct packet_nop { + __le32 reserved; + __le32 ctl; +}; + +struct packet_stop { + __le32 reserved; + __le32 ctl; +}; + +#define GOYA_PKT_WREG32_CTL_REG_OFFSET_SHIFT 0 +#define GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK 0x0000FFFF + +struct packet_wreg32 { + __le32 value; + __le32 ctl; +}; + +struct packet_wreg_bulk { + __le32 size64; + __le32 ctl; + __le64 values[0]; /* data starts here */ +}; + +struct packet_msg_long { + __le32 value; + __le32 ctl; + __le64 addr; +}; + +struct packet_msg_short { + __le32 value; + __le32 ctl; +}; + +struct packet_msg_prot { + __le32 value; + __le32 ctl; + __le64 addr; +}; + +struct packet_fence { + __le32 cfg; + __le32 ctl; +}; + +#define GOYA_PKT_LIN_DMA_CTL_WO_SHIFT 0 +#define GOYA_PKT_LIN_DMA_CTL_WO_MASK 0x00000001 + +#define GOYA_PKT_LIN_DMA_CTL_RDCOMP_SHIFT 1 +#define GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK 0x00000002 + +#define GOYA_PKT_LIN_DMA_CTL_WRCOMP_SHIFT 2 +#define GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK 0x00000004 + +#define GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT 6 +#define GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK 0x00000040 + +#define GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT 20 +#define GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK 0x00700000 + +struct packet_lin_dma { + __le32 tsize; + __le32 ctl; + __le64 src_addr; + __le64 dst_addr; +}; + +struct packet_cp_dma { + __le32 tsize; + __le32 ctl; + __le64 src_addr; +}; + +#endif /* GOYA_PACKETS_H */ diff --git a/drivers/misc/habanalabs/include/qman_if.h b/drivers/misc/habanalabs/include/qman_if.h new file mode 100644 index 000000000000..bf59bbe27fdc --- /dev/null +++ b/drivers/misc/habanalabs/include/qman_if.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef QMAN_IF_H +#define QMAN_IF_H + +#include + +/* + * PRIMARY QUEUE + */ + +struct hl_bd { + __le64 ptr; + __le32 len; + __le32 ctl; +}; + +#define HL_BD_SIZE sizeof(struct hl_bd) + +/* + * BD_CTL_REPEAT_VALID tells the CP whether the repeat field in the BD CTL is + * valid. 1 means the repeat field is valid, 0 means not-valid, + * i.e. repeat == 1 + */ +#define BD_CTL_REPEAT_VALID_SHIFT 24 +#define BD_CTL_REPEAT_VALID_MASK 0x01000000 + +#define BD_CTL_SHADOW_INDEX_SHIFT 0 +#define BD_CTL_SHADOW_INDEX_MASK 0x00000FFF + +/* + * COMPLETION QUEUE + */ + +struct hl_cq_entry { + __le32 data; +}; + +#define HL_CQ_ENTRY_SIZE sizeof(struct hl_cq_entry) + +#define CQ_ENTRY_READY_SHIFT 31 +#define CQ_ENTRY_READY_MASK 0x80000000 + +#define CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT 30 +#define CQ_ENTRY_SHADOW_INDEX_VALID_MASK 0x40000000 + +#define CQ_ENTRY_SHADOW_INDEX_SHIFT BD_CTL_SHADOW_INDEX_SHIFT +#define CQ_ENTRY_SHADOW_INDEX_MASK BD_CTL_SHADOW_INDEX_MASK + + +#endif /* QMAN_IF_H */ diff --git a/drivers/misc/habanalabs/irq.c b/drivers/misc/habanalabs/irq.c new file mode 100644 index 000000000000..6b7d35f6af08 --- /dev/null +++ b/drivers/misc/habanalabs/irq.c @@ -0,0 +1,149 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" + +#include + +/* + * hl_cq_inc_ptr - increment ci or pi of cq + * + * @ptr: the current ci or pi value of the completion queue + * + * Increment ptr by 1. If it reaches the number of completion queue + * entries, set it to 0 + */ +inline u32 hl_cq_inc_ptr(u32 ptr) +{ + ptr++; + if (unlikely(ptr == HL_CQ_LENGTH)) + ptr = 0; + return ptr; +} + +/* + * hl_irq_handler_cq - irq handler for completion queue + * + * @irq: irq number + * @arg: pointer to completion queue structure + * + */ +irqreturn_t hl_irq_handler_cq(int irq, void *arg) +{ + struct hl_cq *cq = arg; + struct hl_device *hdev = cq->hdev; + struct hl_hw_queue *queue; + struct hl_cs_job *job; + bool shadow_index_valid; + u16 shadow_index; + u32 *cq_entry; + u32 *cq_base; + + if (hdev->disabled) { + dev_dbg(hdev->dev, + "Device disabled but received IRQ %d for CQ %d\n", + irq, cq->hw_queue_id); + return IRQ_HANDLED; + } + + cq_base = (u32 *) (uintptr_t) cq->kernel_address; + + while (1) { + bool entry_ready = ((cq_base[cq->ci] & CQ_ENTRY_READY_MASK) + >> CQ_ENTRY_READY_SHIFT); + + if (!entry_ready) + break; + + cq_entry = (u32 *) &cq_base[cq->ci]; + + /* + * Make sure we read CQ entry contents after we've + * checked the ownership bit. + */ + dma_rmb(); + + shadow_index_valid = + ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_VALID_MASK) + >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT); + + shadow_index = (u16) + ((*cq_entry & CQ_ENTRY_SHADOW_INDEX_MASK) + >> CQ_ENTRY_SHADOW_INDEX_SHIFT); + + queue = &hdev->kernel_queues[cq->hw_queue_id]; + + if ((shadow_index_valid) && (!hdev->disabled)) { + job = queue->shadow_queue[hl_pi_2_offset(shadow_index)]; + queue_work(hdev->cq_wq, &job->finish_work); + } + + /* + * Update ci of the context's queue. There is no + * need to protect it with spinlock because this update is + * done only inside IRQ and there is a different IRQ per + * queue + */ + queue->ci = hl_queue_inc_ptr(queue->ci); + + /* Clear CQ entry ready bit */ + cq_base[cq->ci] &= ~CQ_ENTRY_READY_MASK; + + cq->ci = hl_cq_inc_ptr(cq->ci); + + /* Increment free slots */ + atomic_inc(&cq->free_slots_cnt); + } + + return IRQ_HANDLED; +} + +/* + * hl_cq_init - main initialization function for an cq object + * + * @hdev: pointer to device structure + * @q: pointer to cq structure + * @hw_queue_id: The H/W queue ID this completion queue belongs to + * + * Allocate dma-able memory for the completion queue and initialize fields + * Returns 0 on success + */ +int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id) +{ + void *p; + + BUILD_BUG_ON(HL_CQ_SIZE_IN_BYTES > HL_PAGE_SIZE); + + p = hdev->asic_funcs->dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES, + &q->bus_address, GFP_KERNEL | __GFP_ZERO); + if (!p) + return -ENOMEM; + + q->hdev = hdev; + q->kernel_address = (u64) (uintptr_t) p; + q->hw_queue_id = hw_queue_id; + q->ci = 0; + q->pi = 0; + + atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH); + + return 0; +} + +/* + * hl_cq_fini - destroy completion queue + * + * @hdev: pointer to device structure + * @q: pointer to cq structure + * + * Free the completion queue memory + */ +void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q) +{ + hdev->asic_funcs->dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES, + (void *) (uintptr_t) q->kernel_address, q->bus_address); +} diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index a8edfd3e9c95..756266cf0416 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -17,6 +17,35 @@ */ #define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START 0x8000 /* 32KB */ +/* + * Queue Numbering + * + * The external queues (DMA channels + CPU) MUST be before the internal queues + * and each group (DMA channels + CPU and internal) must be contiguous inside + * itself but there can be a gap between the two groups (although not + * recommended) + */ + +enum goya_queue_id { + GOYA_QUEUE_ID_DMA_0 = 0, + GOYA_QUEUE_ID_DMA_1, + GOYA_QUEUE_ID_DMA_2, + GOYA_QUEUE_ID_DMA_3, + GOYA_QUEUE_ID_DMA_4, + GOYA_QUEUE_ID_CPU_PQ, + GOYA_QUEUE_ID_MME, + GOYA_QUEUE_ID_TPC0, + GOYA_QUEUE_ID_TPC1, + GOYA_QUEUE_ID_TPC2, + GOYA_QUEUE_ID_TPC3, + GOYA_QUEUE_ID_TPC4, + GOYA_QUEUE_ID_TPC5, + GOYA_QUEUE_ID_TPC6, + GOYA_QUEUE_ID_TPC7, + GOYA_QUEUE_ID_SIZE +}; + + /* Opcode to create a new command buffer */ #define HL_CB_OP_CREATE 0 /* Opcode to destroy previously created command buffer */ -- cgit v1.2.3-71-gd317 From eff6f4a0e70b7bcf4674f471a768860a74e638a6 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Sat, 16 Feb 2019 00:39:21 +0200 Subject: habanalabs: add command submission module This patch adds the main flow for the user to submit work to the device. Each work is described by a command submission object (CS). The CS contains 3 arrays of command buffers: One for execution, and two for context-switch (store and restore). For each CB, the user specifies on which queue to put that CB. In case of an internal queue, the entry doesn't contain a pointer to the CB but the address in the on-chip memory that the CB resides at. The driver parses some of the CBs to enforce security restrictions. The user receives a sequence number that represents the CS object. The user can then query the driver regarding the status of the CS, using that sequence number. In case the CS doesn't finish before the timeout expires, the driver will perform a soft-reset of the device. Reviewed-by: Mike Rapoport Signed-off-by: Oded Gabbay Signed-off-by: Greg Kroah-Hartman --- drivers/misc/habanalabs/Makefile | 3 +- drivers/misc/habanalabs/command_submission.c | 766 ++++++++++++++++++ drivers/misc/habanalabs/context.c | 52 +- drivers/misc/habanalabs/device.c | 16 + drivers/misc/habanalabs/goya/goya.c | 1126 ++++++++++++++++++++++++++ drivers/misc/habanalabs/habanalabs.h | 275 +++++++ drivers/misc/habanalabs/habanalabs_drv.c | 20 + drivers/misc/habanalabs/habanalabs_ioctl.c | 4 +- drivers/misc/habanalabs/hw_queue.c | 232 ++++++ drivers/misc/habanalabs/memory.c | 198 +++++ include/uapi/misc/habanalabs.h | 158 +++- 11 files changed, 2843 insertions(+), 7 deletions(-) create mode 100644 drivers/misc/habanalabs/command_submission.c create mode 100644 drivers/misc/habanalabs/memory.c (limited to 'include') diff --git a/drivers/misc/habanalabs/Makefile b/drivers/misc/habanalabs/Makefile index b5607233d216..d2fd0e18b1eb 100644 --- a/drivers/misc/habanalabs/Makefile +++ b/drivers/misc/habanalabs/Makefile @@ -5,7 +5,8 @@ obj-m := habanalabs.o habanalabs-y := habanalabs_drv.o device.o context.o asid.o habanalabs_ioctl.o \ - command_buffer.o hw_queue.o irq.o sysfs.o hwmon.o + command_buffer.o hw_queue.o irq.o sysfs.o hwmon.o memory.o \ + command_submission.o include $(src)/goya/Makefile habanalabs-y += $(HL_GOYA_FILES) diff --git a/drivers/misc/habanalabs/command_submission.c b/drivers/misc/habanalabs/command_submission.c new file mode 100644 index 000000000000..ae68b97e428d --- /dev/null +++ b/drivers/misc/habanalabs/command_submission.c @@ -0,0 +1,766 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include +#include "habanalabs.h" + +#include +#include + +static void job_wq_completion(struct work_struct *work); +static long _hl_cs_wait_ioctl(struct hl_device *hdev, + struct hl_ctx *ctx, u64 timeout_us, u64 seq); +static void cs_do_release(struct kref *ref); + +static const char *hl_fence_get_driver_name(struct dma_fence *fence) +{ + return "HabanaLabs"; +} + +static const char *hl_fence_get_timeline_name(struct dma_fence *fence) +{ + struct hl_dma_fence *hl_fence = + container_of(fence, struct hl_dma_fence, base_fence); + + return dev_name(hl_fence->hdev->dev); +} + +static bool hl_fence_enable_signaling(struct dma_fence *fence) +{ + return true; +} + +static void hl_fence_release(struct dma_fence *fence) +{ + struct hl_dma_fence *hl_fence = + container_of(fence, struct hl_dma_fence, base_fence); + + kfree_rcu(hl_fence, base_fence.rcu); +} + +static const struct dma_fence_ops hl_fence_ops = { + .get_driver_name = hl_fence_get_driver_name, + .get_timeline_name = hl_fence_get_timeline_name, + .enable_signaling = hl_fence_enable_signaling, + .wait = dma_fence_default_wait, + .release = hl_fence_release +}; + +static void cs_get(struct hl_cs *cs) +{ + kref_get(&cs->refcount); +} + +static int cs_get_unless_zero(struct hl_cs *cs) +{ + return kref_get_unless_zero(&cs->refcount); +} + +static void cs_put(struct hl_cs *cs) +{ + kref_put(&cs->refcount, cs_do_release); +} + +/* + * cs_parser - parse the user command submission + * + * @hpriv : pointer to the private data of the fd + * @job : pointer to the job that holds the command submission info + * + * The function parses the command submission of the user. It calls the + * ASIC specific parser, which returns a list of memory blocks to send + * to the device as different command buffers + * + */ +static int cs_parser(struct hl_fpriv *hpriv, struct hl_cs_job *job) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_cs_parser parser; + int rc; + + parser.ctx_id = job->cs->ctx->asid; + parser.cs_sequence = job->cs->sequence; + parser.job_id = job->id; + + parser.hw_queue_id = job->hw_queue_id; + parser.job_userptr_list = &job->userptr_list; + parser.patched_cb = NULL; + parser.user_cb = job->user_cb; + parser.user_cb_size = job->user_cb_size; + parser.ext_queue = job->ext_queue; + job->patched_cb = NULL; + parser.use_virt_addr = hdev->mmu_enable; + + rc = hdev->asic_funcs->cs_parser(hdev, &parser); + if (job->ext_queue) { + if (!rc) { + job->patched_cb = parser.patched_cb; + job->job_cb_size = parser.patched_cb_size; + + spin_lock(&job->patched_cb->lock); + job->patched_cb->cs_cnt++; + spin_unlock(&job->patched_cb->lock); + } + + /* + * Whether the parsing worked or not, we don't need the + * original CB anymore because it was already parsed and + * won't be accessed again for this CS + */ + spin_lock(&job->user_cb->lock); + job->user_cb->cs_cnt--; + spin_unlock(&job->user_cb->lock); + hl_cb_put(job->user_cb); + job->user_cb = NULL; + } + + return rc; +} + +static void free_job(struct hl_device *hdev, struct hl_cs_job *job) +{ + struct hl_cs *cs = job->cs; + + if (job->ext_queue) { + hl_userptr_delete_list(hdev, &job->userptr_list); + + /* + * We might arrive here from rollback and patched CB wasn't + * created, so we need to check it's not NULL + */ + if (job->patched_cb) { + spin_lock(&job->patched_cb->lock); + job->patched_cb->cs_cnt--; + spin_unlock(&job->patched_cb->lock); + + hl_cb_put(job->patched_cb); + } + } + + /* + * This is the only place where there can be multiple threads + * modifying the list at the same time + */ + spin_lock(&cs->job_lock); + list_del(&job->cs_node); + spin_unlock(&cs->job_lock); + + if (job->ext_queue) + cs_put(cs); + + kfree(job); +} + +static void cs_do_release(struct kref *ref) +{ + struct hl_cs *cs = container_of(ref, struct hl_cs, + refcount); + struct hl_device *hdev = cs->ctx->hdev; + struct hl_cs_job *job, *tmp; + + cs->completed = true; + + /* + * Although if we reached here it means that all external jobs have + * finished, because each one of them took refcnt to CS, we still + * need to go over the internal jobs and free them. Otherwise, we + * will have leaked memory and what's worse, the CS object (and + * potentially the CTX object) could be released, while the JOB + * still holds a pointer to them (but no reference). + */ + list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) + free_job(hdev, job); + + /* We also need to update CI for internal queues */ + if (cs->submitted) { + hl_int_hw_queue_update_ci(cs); + + spin_lock(&hdev->hw_queues_mirror_lock); + /* remove CS from hw_queues mirror list */ + list_del_init(&cs->mirror_node); + spin_unlock(&hdev->hw_queues_mirror_lock); + + /* + * Don't cancel TDR in case this CS was timedout because we + * might be running from the TDR context + */ + if ((!cs->timedout) && + (hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT)) { + struct hl_cs *next; + + if (cs->tdr_active) + cancel_delayed_work_sync(&cs->work_tdr); + + spin_lock(&hdev->hw_queues_mirror_lock); + + /* queue TDR for next CS */ + next = list_first_entry_or_null( + &hdev->hw_queues_mirror_list, + struct hl_cs, mirror_node); + + if ((next) && (!next->tdr_active)) { + next->tdr_active = true; + schedule_delayed_work(&next->work_tdr, + hdev->timeout_jiffies); + } + + spin_unlock(&hdev->hw_queues_mirror_lock); + } + } + + hl_ctx_put(cs->ctx); + + if (cs->timedout) + dma_fence_set_error(cs->fence, -ETIMEDOUT); + else if (cs->aborted) + dma_fence_set_error(cs->fence, -EIO); + + dma_fence_signal(cs->fence); + dma_fence_put(cs->fence); + + kfree(cs); +} + +static void cs_timedout(struct work_struct *work) +{ + struct hl_device *hdev; + int ctx_asid, rc; + struct hl_cs *cs = container_of(work, struct hl_cs, + work_tdr.work); + rc = cs_get_unless_zero(cs); + if (!rc) + return; + + if ((!cs->submitted) || (cs->completed)) { + cs_put(cs); + return; + } + + /* Mark the CS is timed out so we won't try to cancel its TDR */ + cs->timedout = true; + + hdev = cs->ctx->hdev; + ctx_asid = cs->ctx->asid; + + /* TODO: add information about last signaled seq and last emitted seq */ + dev_err(hdev->dev, "CS %d.%llu got stuck!\n", ctx_asid, cs->sequence); + + cs_put(cs); + + if (hdev->reset_on_lockup) + hl_device_reset(hdev, false, false); +} + +static int allocate_cs(struct hl_device *hdev, struct hl_ctx *ctx, + struct hl_cs **cs_new) +{ + struct hl_dma_fence *fence; + struct dma_fence *other = NULL; + struct hl_cs *cs; + int rc; + + cs = kzalloc(sizeof(*cs), GFP_ATOMIC); + if (!cs) + return -ENOMEM; + + cs->ctx = ctx; + cs->submitted = false; + cs->completed = false; + INIT_LIST_HEAD(&cs->job_list); + INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout); + kref_init(&cs->refcount); + spin_lock_init(&cs->job_lock); + + fence = kmalloc(sizeof(*fence), GFP_ATOMIC); + if (!fence) { + rc = -ENOMEM; + goto free_cs; + } + + fence->hdev = hdev; + spin_lock_init(&fence->lock); + cs->fence = &fence->base_fence; + + spin_lock(&ctx->cs_lock); + + fence->cs_seq = ctx->cs_sequence; + other = ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)]; + if ((other) && (!dma_fence_is_signaled(other))) { + spin_unlock(&ctx->cs_lock); + rc = -EAGAIN; + goto free_fence; + } + + dma_fence_init(&fence->base_fence, &hl_fence_ops, &fence->lock, + ctx->asid, ctx->cs_sequence); + + cs->sequence = fence->cs_seq; + + ctx->cs_pending[fence->cs_seq & (HL_MAX_PENDING_CS - 1)] = + &fence->base_fence; + ctx->cs_sequence++; + + dma_fence_get(&fence->base_fence); + + dma_fence_put(other); + + spin_unlock(&ctx->cs_lock); + + *cs_new = cs; + + return 0; + +free_fence: + kfree(fence); +free_cs: + kfree(cs); + return rc; +} + +static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs) +{ + struct hl_cs_job *job, *tmp; + + list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) + free_job(hdev, job); +} + +void hl_cs_rollback_all(struct hl_device *hdev) +{ + struct hl_cs *cs, *tmp; + + /* flush all completions */ + flush_workqueue(hdev->cq_wq); + + /* Make sure we don't have leftovers in the H/W queues mirror list */ + list_for_each_entry_safe(cs, tmp, &hdev->hw_queues_mirror_list, + mirror_node) { + cs_get(cs); + cs->aborted = true; + dev_warn_ratelimited(hdev->dev, "Killing CS %d.%llu\n", + cs->ctx->asid, cs->sequence); + cs_rollback(hdev, cs); + cs_put(cs); + } +} + +static void job_wq_completion(struct work_struct *work) +{ + struct hl_cs_job *job = container_of(work, struct hl_cs_job, + finish_work); + struct hl_cs *cs = job->cs; + struct hl_device *hdev = cs->ctx->hdev; + + /* job is no longer needed */ + free_job(hdev, job); +} + +static struct hl_cb *validate_queue_index(struct hl_device *hdev, + struct hl_cb_mgr *cb_mgr, + struct hl_cs_chunk *chunk, + bool *ext_queue) +{ + struct asic_fixed_properties *asic = &hdev->asic_prop; + struct hw_queue_properties *hw_queue_prop; + u32 cb_handle; + struct hl_cb *cb; + + /* Assume external queue */ + *ext_queue = true; + + hw_queue_prop = &asic->hw_queues_props[chunk->queue_index]; + + if ((chunk->queue_index >= HL_MAX_QUEUES) || + (hw_queue_prop->type == QUEUE_TYPE_NA)) { + dev_err(hdev->dev, "Queue index %d is invalid\n", + chunk->queue_index); + return NULL; + } + + if (hw_queue_prop->kmd_only) { + dev_err(hdev->dev, "Queue index %d is restricted for KMD\n", + chunk->queue_index); + return NULL; + } else if (hw_queue_prop->type == QUEUE_TYPE_INT) { + *ext_queue = false; + return (struct hl_cb *) (uintptr_t) chunk->cb_handle; + } + + /* Retrieve CB object */ + cb_handle = (u32) (chunk->cb_handle >> PAGE_SHIFT); + + cb = hl_cb_get(hdev, cb_mgr, cb_handle); + if (!cb) { + dev_err(hdev->dev, "CB handle 0x%x invalid\n", cb_handle); + return NULL; + } + + if ((chunk->cb_size < 8) || (chunk->cb_size > cb->size)) { + dev_err(hdev->dev, "CB size %u invalid\n", chunk->cb_size); + goto release_cb; + } + + spin_lock(&cb->lock); + cb->cs_cnt++; + spin_unlock(&cb->lock); + + return cb; + +release_cb: + hl_cb_put(cb); + return NULL; +} + +struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue) +{ + struct hl_cs_job *job; + + job = kzalloc(sizeof(*job), GFP_ATOMIC); + if (!job) + return NULL; + + job->ext_queue = ext_queue; + + if (job->ext_queue) { + INIT_LIST_HEAD(&job->userptr_list); + INIT_WORK(&job->finish_work, job_wq_completion); + } + + return job; +} + +static int _hl_cs_ioctl(struct hl_fpriv *hpriv, void __user *chunks, + u32 num_chunks, u64 *cs_seq) +{ + struct hl_device *hdev = hpriv->hdev; + struct hl_cs_chunk *cs_chunk_array; + struct hl_cs_job *job; + struct hl_cs *cs; + struct hl_cb *cb; + bool ext_queue_present = false; + u32 size_to_copy; + int rc, i, parse_cnt; + + *cs_seq = ULLONG_MAX; + + if (num_chunks > HL_MAX_JOBS_PER_CS) { + dev_err(hdev->dev, + "Number of chunks can NOT be larger than %d\n", + HL_MAX_JOBS_PER_CS); + rc = -EINVAL; + goto out; + } + + cs_chunk_array = kmalloc_array(num_chunks, sizeof(*cs_chunk_array), + GFP_ATOMIC); + if (!cs_chunk_array) { + rc = -ENOMEM; + goto out; + } + + size_to_copy = num_chunks * sizeof(struct hl_cs_chunk); + if (copy_from_user(cs_chunk_array, chunks, size_to_copy)) { + dev_err(hdev->dev, "Failed to copy cs chunk array from user\n"); + rc = -EFAULT; + goto free_cs_chunk_array; + } + + /* increment refcnt for context */ + hl_ctx_get(hdev, hpriv->ctx); + + rc = allocate_cs(hdev, hpriv->ctx, &cs); + if (rc) { + hl_ctx_put(hpriv->ctx); + goto free_cs_chunk_array; + } + + *cs_seq = cs->sequence; + + /* Validate ALL the CS chunks before submitting the CS */ + for (i = 0, parse_cnt = 0 ; i < num_chunks ; i++, parse_cnt++) { + struct hl_cs_chunk *chunk = &cs_chunk_array[i]; + bool ext_queue; + + cb = validate_queue_index(hdev, &hpriv->cb_mgr, chunk, + &ext_queue); + if (ext_queue) { + ext_queue_present = true; + if (!cb) { + rc = -EINVAL; + goto free_cs_object; + } + } + + job = hl_cs_allocate_job(hdev, ext_queue); + if (!job) { + dev_err(hdev->dev, "Failed to allocate a new job\n"); + rc = -ENOMEM; + if (ext_queue) + goto release_cb; + else + goto free_cs_object; + } + + job->id = i + 1; + job->cs = cs; + job->user_cb = cb; + job->user_cb_size = chunk->cb_size; + if (job->ext_queue) + job->job_cb_size = cb->size; + else + job->job_cb_size = chunk->cb_size; + job->hw_queue_id = chunk->queue_index; + + cs->jobs_in_queue_cnt[job->hw_queue_id]++; + + list_add_tail(&job->cs_node, &cs->job_list); + + /* + * Increment CS reference. When CS reference is 0, CS is + * done and can be signaled to user and free all its resources + * Only increment for JOB on external queues, because only + * for those JOBs we get completion + */ + if (job->ext_queue) + cs_get(cs); + + rc = cs_parser(hpriv, job); + if (rc) { + dev_err(hdev->dev, + "Failed to parse JOB %d.%llu.%d, err %d, rejecting the CS\n", + cs->ctx->asid, cs->sequence, job->id, rc); + goto free_cs_object; + } + } + + if (!ext_queue_present) { + dev_err(hdev->dev, + "Reject CS %d.%llu because no external queues jobs\n", + cs->ctx->asid, cs->sequence); + rc = -EINVAL; + goto free_cs_object; + } + + rc = hl_hw_queue_schedule_cs(cs); + if (rc) { + dev_err(hdev->dev, + "Failed to submit CS %d.%llu to H/W queues, error %d\n", + cs->ctx->asid, cs->sequence, rc); + goto free_cs_object; + } + + rc = HL_CS_STATUS_SUCCESS; + goto put_cs; + +release_cb: + spin_lock(&cb->lock); + cb->cs_cnt--; + spin_unlock(&cb->lock); + hl_cb_put(cb); +free_cs_object: + cs_rollback(hdev, cs); + *cs_seq = ULLONG_MAX; + /* The path below is both for good and erroneous exits */ +put_cs: + /* We finished with the CS in this function, so put the ref */ + cs_put(cs); +free_cs_chunk_array: + kfree(cs_chunk_array); +out: + return rc; +} + +int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) +{ + struct hl_device *hdev = hpriv->hdev; + union hl_cs_args *args = data; + struct hl_ctx *ctx = hpriv->ctx; + void __user *chunks; + u32 num_chunks; + u64 cs_seq = ULONG_MAX; + int rc, do_restore; + bool need_soft_reset = false; + + if (hl_device_disabled_or_in_reset(hdev)) { + dev_warn(hdev->dev, + "Device is %s. Can't submit new CS\n", + atomic_read(&hdev->in_reset) ? "in_reset" : "disabled"); + rc = -EBUSY; + goto out; + } + + do_restore = atomic_cmpxchg(&ctx->thread_restore_token, 1, 0); + + if (do_restore || (args->in.cs_flags & HL_CS_FLAGS_FORCE_RESTORE)) { + long ret; + + chunks = (void __user *)(uintptr_t)args->in.chunks_restore; + num_chunks = args->in.num_chunks_restore; + + mutex_lock(&hpriv->restore_phase_mutex); + + if (do_restore) { + rc = hdev->asic_funcs->context_switch(hdev, ctx->asid); + if (rc) { + dev_err_ratelimited(hdev->dev, + "Failed to switch to context %d, rejecting CS! %d\n", + ctx->asid, rc); + /* + * If we timedout, we need to soft-reset because + * QMAN is probably stuck. However, we can't + * call to reset here directly because of + * deadlock, so need to do it at the very end + * of this function + */ + if (rc == -ETIMEDOUT) + need_soft_reset = true; + mutex_unlock(&hpriv->restore_phase_mutex); + goto out; + } + } + + hdev->asic_funcs->restore_phase_topology(hdev); + + if (num_chunks == 0) { + dev_dbg(hdev->dev, + "Need to run restore phase but restore CS is empty\n"); + rc = 0; + } else { + rc = _hl_cs_ioctl(hpriv, chunks, num_chunks, + &cs_seq); + } + + mutex_unlock(&hpriv->restore_phase_mutex); + + if (rc) { + dev_err(hdev->dev, + "Failed to submit restore CS for context %d (%d)\n", + ctx->asid, rc); + goto out; + } + + /* Need to wait for restore completion before execution phase */ + if (num_chunks > 0) { + ret = _hl_cs_wait_ioctl(hdev, ctx, + jiffies_to_usecs(hdev->timeout_jiffies), + cs_seq); + if (ret <= 0) { + dev_err(hdev->dev, + "Restore CS for context %d failed to complete %ld\n", + ctx->asid, ret); + rc = -ENOEXEC; + goto out; + } + } + + ctx->thread_restore_wait_token = 1; + } else if (!ctx->thread_restore_wait_token) { + u32 tmp; + + rc = hl_poll_timeout_memory(hdev, + (u64) (uintptr_t) &ctx->thread_restore_wait_token, + jiffies_to_usecs(hdev->timeout_jiffies), + &tmp); + + if (rc || !tmp) { + dev_err(hdev->dev, + "restore phase hasn't finished in time\n"); + rc = -ETIMEDOUT; + goto out; + } + } + + chunks = (void __user *)(uintptr_t)args->in.chunks_execute; + num_chunks = args->in.num_chunks_execute; + + if (num_chunks == 0) { + dev_err(hdev->dev, + "Got execute CS with 0 chunks, context %d\n", + ctx->asid); + rc = -EINVAL; + goto out; + } + + rc = _hl_cs_ioctl(hpriv, chunks, num_chunks, &cs_seq); + +out: + if (rc != -EAGAIN) { + memset(args, 0, sizeof(*args)); + args->out.status = rc; + args->out.seq = cs_seq; + } + + if ((rc == -ETIMEDOUT) && (need_soft_reset)) + hl_device_reset(hdev, false, false); + + return rc; +} + +static long _hl_cs_wait_ioctl(struct hl_device *hdev, + struct hl_ctx *ctx, u64 timeout_us, u64 seq) +{ + struct dma_fence *fence; + unsigned long timeout; + long rc; + + if (timeout_us == MAX_SCHEDULE_TIMEOUT) + timeout = timeout_us; + else + timeout = usecs_to_jiffies(timeout_us); + + hl_ctx_get(hdev, ctx); + + fence = hl_ctx_get_fence(ctx, seq); + if (IS_ERR(fence)) { + rc = PTR_ERR(fence); + } else if (fence) { + rc = dma_fence_wait_timeout(fence, true, timeout); + if (fence->error == -ETIMEDOUT) + rc = -ETIMEDOUT; + else if (fence->error == -EIO) + rc = -EIO; + dma_fence_put(fence); + } else + rc = 1; + + hl_ctx_put(ctx); + + return rc; +} + +int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data) +{ + struct hl_device *hdev = hpriv->hdev; + union hl_wait_cs_args *args = data; + u64 seq = args->in.seq; + long rc; + + rc = _hl_cs_wait_ioctl(hdev, hpriv->ctx, args->in.timeout_us, seq); + + memset(args, 0, sizeof(*args)); + + if (rc < 0) { + dev_err(hdev->dev, "Error %ld on waiting for CS handle %llu\n", + rc, seq); + if (rc == -ERESTARTSYS) { + args->out.status = HL_WAIT_CS_STATUS_INTERRUPTED; + rc = -EINTR; + } else if (rc == -ETIMEDOUT) { + args->out.status = HL_WAIT_CS_STATUS_TIMEDOUT; + } else if (rc == -EIO) { + args->out.status = HL_WAIT_CS_STATUS_ABORTED; + } + return rc; + } + + if (rc == 0) + args->out.status = HL_WAIT_CS_STATUS_BUSY; + else + args->out.status = HL_WAIT_CS_STATUS_COMPLETED; + + return 0; +} diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c index de1258e7a6e6..c3854714b46c 100644 --- a/drivers/misc/habanalabs/context.c +++ b/drivers/misc/habanalabs/context.c @@ -12,6 +12,18 @@ static void hl_ctx_fini(struct hl_ctx *ctx) { struct hl_device *hdev = ctx->hdev; + int i; + + /* + * If we arrived here, there are no jobs waiting for this context + * on its queues so we can safely remove it. + * This is because for each CS, we increment the ref count and for + * every CS that was finished we decrement it and we won't arrive + * to this function unless the ref count is 0 + */ + + for (i = 0 ; i < HL_MAX_PENDING_CS ; i++) + dma_fence_put(ctx->cs_pending[i]); if (ctx->asid != HL_KERNEL_ASID_ID) hl_asid_free(hdev, ctx->asid); @@ -23,8 +35,6 @@ void hl_ctx_do_release(struct kref *ref) ctx = container_of(ref, struct hl_ctx, refcount); - dev_dbg(ctx->hdev->dev, "Now really releasing context %d\n", ctx->asid); - hl_ctx_fini(ctx); if (ctx->hpriv) @@ -90,6 +100,11 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx) kref_init(&ctx->refcount); + ctx->cs_sequence = 1; + spin_lock_init(&ctx->cs_lock); + atomic_set(&ctx->thread_restore_token, 1); + ctx->thread_restore_wait_token = 0; + if (is_kernel_ctx) { ctx->asid = HL_KERNEL_ASID_ID; /* KMD gets ASID 0 */ } else { @@ -100,8 +115,6 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx) } } - dev_dbg(hdev->dev, "Created context with ASID %u\n", ctx->asid); - return 0; } @@ -115,6 +128,37 @@ int hl_ctx_put(struct hl_ctx *ctx) return kref_put(&ctx->refcount, hl_ctx_do_release); } +struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq) +{ + struct hl_device *hdev = ctx->hdev; + struct dma_fence *fence; + + spin_lock(&ctx->cs_lock); + + if (seq >= ctx->cs_sequence) { + dev_notice(hdev->dev, + "Can't wait on seq %llu because current CS is at seq %llu\n", + seq, ctx->cs_sequence); + spin_unlock(&ctx->cs_lock); + return ERR_PTR(-EINVAL); + } + + + if (seq + HL_MAX_PENDING_CS < ctx->cs_sequence) { + dev_dbg(hdev->dev, + "Can't wait on seq %llu because current CS is at seq %llu (Fence is gone)\n", + seq, ctx->cs_sequence); + spin_unlock(&ctx->cs_lock); + return NULL; + } + + fence = dma_fence_get( + ctx->cs_pending[seq & (HL_MAX_PENDING_CS - 1)]); + spin_unlock(&ctx->cs_lock); + + return fence; +} + /* * hl_ctx_mgr_init - initialize the context manager * diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c index 2aa8a68cdf76..cc5f068df597 100644 --- a/drivers/misc/habanalabs/device.c +++ b/drivers/misc/habanalabs/device.c @@ -30,6 +30,8 @@ static void hpriv_release(struct kref *ref) put_pid(hpriv->taskpid); + mutex_destroy(&hpriv->restore_phase_mutex); + kfree(hpriv); /* Now the FD is really closed */ @@ -208,6 +210,8 @@ static int device_early_init(struct hl_device *hdev) mutex_init(&hdev->fd_open_cnt_lock); mutex_init(&hdev->send_cpu_message_lock); + INIT_LIST_HEAD(&hdev->hw_queues_mirror_list); + spin_lock_init(&hdev->hw_queues_mirror_lock); atomic_set(&hdev->in_reset, 0); atomic_set(&hdev->fd_open_cnt, 0); @@ -593,6 +597,9 @@ again: */ hdev->asic_funcs->halt_engines(hdev, hard_reset); + /* Go over all the queues, release all CS and their jobs */ + hl_cs_rollback_all(hdev); + if (hard_reset) { /* Release kernel context */ if (hl_ctx_put(hdev->kernel_ctx) != 1) { @@ -616,6 +623,12 @@ again: for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) hl_cq_reset(hdev, &hdev->completion_queue[i]); + /* Make sure the setup phase for the user context will run again */ + if (hdev->user_ctx) { + atomic_set(&hdev->user_ctx->thread_restore_token, 1); + hdev->user_ctx->thread_restore_wait_token = 0; + } + /* Finished tear-down, starting to re-initialize */ if (hard_reset) { @@ -952,6 +965,9 @@ void hl_device_fini(struct hl_device *hdev) */ hdev->asic_funcs->halt_engines(hdev, true); + /* Go over all the queues, release all CS and their jobs */ + hl_cs_rollback_all(hdev); + hl_cb_pool_fini(hdev); /* Release kernel context */ diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 1fe1d6a1ff9e..e3878fd7dc94 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -95,6 +95,19 @@ static const char goya_irq_name[GOYA_MSIX_ENTRIES][GOYA_MAX_STRING_LEN] = { "goya cq 4", "goya cpu eq" }; +static u16 goya_packet_sizes[MAX_PACKET_ID] = { + [PACKET_WREG_32] = sizeof(struct packet_wreg32), + [PACKET_WREG_BULK] = sizeof(struct packet_wreg_bulk), + [PACKET_MSG_LONG] = sizeof(struct packet_msg_long), + [PACKET_MSG_SHORT] = sizeof(struct packet_msg_short), + [PACKET_CP_DMA] = sizeof(struct packet_cp_dma), + [PACKET_MSG_PROT] = sizeof(struct packet_msg_prot), + [PACKET_FENCE] = sizeof(struct packet_fence), + [PACKET_LIN_DMA] = sizeof(struct packet_lin_dma), + [PACKET_NOP] = sizeof(struct packet_nop), + [PACKET_STOP] = sizeof(struct packet_stop) +}; + static const char *goya_axi_name[GOYA_MAX_INITIATORS] = { "MME0", "MME1", @@ -2978,6 +2991,84 @@ void *goya_get_int_queue_base(struct hl_device *hdev, u32 queue_id, return base; } +int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job) +{ + struct goya_device *goya = hdev->asic_specific; + struct packet_msg_prot *fence_pkt; + u32 *fence_ptr; + dma_addr_t fence_dma_addr; + struct hl_cb *cb; + u32 tmp; + int rc; + + if (!hdev->asic_funcs->is_device_idle(hdev)) { + dev_err_ratelimited(hdev->dev, + "Can't send KMD job on QMAN0 if device is not idle\n"); + return -EFAULT; + } + + fence_ptr = hdev->asic_funcs->dma_pool_zalloc(hdev, 4, GFP_KERNEL, + &fence_dma_addr); + if (!fence_ptr) { + dev_err(hdev->dev, + "Failed to allocate fence memory for QMAN0\n"); + return -ENOMEM; + } + + *fence_ptr = 0; + + if (goya->hw_cap_initialized & HW_CAP_MMU) { + WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_FULLY_TRUSTED); + RREG32(mmDMA_QM_0_GLBL_PROT); + } + + /* + * goya cs parser saves space for 2xpacket_msg_prot at end of CB. For + * synchronized kernel jobs we only need space for 1 packet_msg_prot + */ + job->job_cb_size -= sizeof(struct packet_msg_prot); + + cb = job->patched_cb; + + fence_pkt = (struct packet_msg_prot *) (uintptr_t) (cb->kernel_address + + job->job_cb_size - sizeof(struct packet_msg_prot)); + + fence_pkt->ctl = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) | + (1 << GOYA_PKT_CTL_EB_SHIFT) | + (1 << GOYA_PKT_CTL_MB_SHIFT); + fence_pkt->value = GOYA_QMAN0_FENCE_VAL; + fence_pkt->addr = fence_dma_addr + + hdev->asic_prop.host_phys_base_address; + + rc = hl_hw_queue_send_cb_no_cmpl(hdev, GOYA_QUEUE_ID_DMA_0, + job->job_cb_size, cb->bus_address); + if (rc) { + dev_err(hdev->dev, "Failed to send CB on QMAN0, %d\n", rc); + goto free_fence_ptr; + } + + rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr, + HL_DEVICE_TIMEOUT_USEC, &tmp); + + hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0); + + if ((rc) || (tmp != GOYA_QMAN0_FENCE_VAL)) { + dev_err(hdev->dev, "QMAN0 Job hasn't finished in time\n"); + rc = -ETIMEDOUT; + } + +free_fence_ptr: + hdev->asic_funcs->dma_pool_free(hdev, (void *) fence_ptr, + fence_dma_addr); + + if (goya->hw_cap_initialized & HW_CAP_MMU) { + WREG32(mmDMA_QM_0_GLBL_PROT, QMAN_DMA_PARTLY_TRUSTED); + RREG32(mmDMA_QM_0_GLBL_PROT); + } + + return rc; +} + int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len, u32 timeout, long *result) { @@ -3214,11 +3305,985 @@ void goya_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, size); } +int goya_dma_map_sg(struct hl_device *hdev, struct scatterlist *sg, int nents, + enum dma_data_direction dir) +{ + if (!dma_map_sg(&hdev->pdev->dev, sg, nents, dir)) + return -ENOMEM; + + return 0; +} + +void goya_dma_unmap_sg(struct hl_device *hdev, struct scatterlist *sg, + int nents, enum dma_data_direction dir) +{ + dma_unmap_sg(&hdev->pdev->dev, sg, nents, dir); +} + +u32 goya_get_dma_desc_list_size(struct hl_device *hdev, + struct sg_table *sgt) +{ + struct scatterlist *sg, *sg_next_iter; + u32 count, len, dma_desc_cnt, len_next; + dma_addr_t addr, addr_next; + + dma_desc_cnt = 0; + + for_each_sg(sgt->sgl, sg, sgt->nents, count) { + + len = sg_dma_len(sg); + addr = sg_dma_address(sg); + + if (len == 0) + break; + + while ((count + 1) < sgt->nents) { + sg_next_iter = sg_next(sg); + len_next = sg_dma_len(sg_next_iter); + addr_next = sg_dma_address(sg_next_iter); + + if (len_next == 0) + break; + + if ((addr + len == addr_next) && + (len + len_next <= DMA_MAX_TRANSFER_SIZE)) { + len += len_next; + count++; + sg = sg_next_iter; + } else { + break; + } + } + + dma_desc_cnt++; + } + + return dma_desc_cnt * sizeof(struct packet_lin_dma); +} + +static int goya_pin_memory_before_cs(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt, + u64 addr, enum dma_data_direction dir) +{ + struct hl_userptr *userptr; + int rc; + + if (hl_userptr_is_pinned(hdev, addr, user_dma_pkt->tsize, + parser->job_userptr_list, &userptr)) + goto already_pinned; + + userptr = kzalloc(sizeof(*userptr), GFP_ATOMIC); + if (!userptr) + return -ENOMEM; + + rc = hl_pin_host_memory(hdev, addr, user_dma_pkt->tsize, userptr); + if (rc) + goto free_userptr; + + list_add_tail(&userptr->job_node, parser->job_userptr_list); + + rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl, + userptr->sgt->nents, dir); + if (rc) { + dev_err(hdev->dev, "failed to map sgt with DMA region\n"); + goto unpin_memory; + } + + userptr->dma_mapped = true; + userptr->dir = dir; + +already_pinned: + parser->patched_cb_size += + goya_get_dma_desc_list_size(hdev, userptr->sgt); + + return 0; + +unpin_memory: + hl_unpin_host_memory(hdev, userptr); +free_userptr: + kfree(userptr); + return rc; +} + +static int goya_validate_dma_pkt_host(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt) +{ + u64 device_memory_addr, addr; + enum dma_data_direction dir; + enum goya_dma_direction user_dir; + bool sram_addr = true; + bool skip_host_mem_pin = false; + bool user_memset; + int rc = 0; + + user_dir = (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >> + GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT; + + user_memset = (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >> + GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT; + + switch (user_dir) { + case DMA_HOST_TO_DRAM: + dev_dbg(hdev->dev, "DMA direction is HOST --> DRAM\n"); + dir = DMA_TO_DEVICE; + sram_addr = false; + addr = user_dma_pkt->src_addr; + device_memory_addr = user_dma_pkt->dst_addr; + if (user_memset) + skip_host_mem_pin = true; + break; + + case DMA_DRAM_TO_HOST: + dev_dbg(hdev->dev, "DMA direction is DRAM --> HOST\n"); + dir = DMA_FROM_DEVICE; + sram_addr = false; + addr = user_dma_pkt->dst_addr; + device_memory_addr = user_dma_pkt->src_addr; + break; + + case DMA_HOST_TO_SRAM: + dev_dbg(hdev->dev, "DMA direction is HOST --> SRAM\n"); + dir = DMA_TO_DEVICE; + addr = user_dma_pkt->src_addr; + device_memory_addr = user_dma_pkt->dst_addr; + if (user_memset) + skip_host_mem_pin = true; + break; + + case DMA_SRAM_TO_HOST: + dev_dbg(hdev->dev, "DMA direction is SRAM --> HOST\n"); + dir = DMA_FROM_DEVICE; + addr = user_dma_pkt->dst_addr; + device_memory_addr = user_dma_pkt->src_addr; + break; + default: + dev_err(hdev->dev, "DMA direction is undefined\n"); + return -EFAULT; + } + + if (parser->ctx_id != HL_KERNEL_ASID_ID) { + if (sram_addr) { + if (!hl_mem_area_inside_range(device_memory_addr, + user_dma_pkt->tsize, + hdev->asic_prop.sram_user_base_address, + hdev->asic_prop.sram_end_address)) { + + dev_err(hdev->dev, + "SRAM address 0x%llx + 0x%x is invalid\n", + device_memory_addr, + user_dma_pkt->tsize); + return -EFAULT; + } + } else { + if (!hl_mem_area_inside_range(device_memory_addr, + user_dma_pkt->tsize, + hdev->asic_prop.dram_user_base_address, + hdev->asic_prop.dram_end_address)) { + + dev_err(hdev->dev, + "DRAM address 0x%llx + 0x%x is invalid\n", + device_memory_addr, + user_dma_pkt->tsize); + return -EFAULT; + } + } + } + + if (skip_host_mem_pin) + parser->patched_cb_size += sizeof(*user_dma_pkt); + else { + if ((dir == DMA_TO_DEVICE) && + (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1)) { + dev_err(hdev->dev, + "Can't DMA from host on queue other then 1\n"); + return -EFAULT; + } + + rc = goya_pin_memory_before_cs(hdev, parser, user_dma_pkt, + addr, dir); + } + + return rc; +} + +static int goya_validate_dma_pkt_no_host(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt) +{ + u64 sram_memory_addr, dram_memory_addr; + enum goya_dma_direction user_dir; + + user_dir = (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >> + GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT; + + if (user_dir == DMA_DRAM_TO_SRAM) { + dev_dbg(hdev->dev, "DMA direction is DRAM --> SRAM\n"); + dram_memory_addr = user_dma_pkt->src_addr; + sram_memory_addr = user_dma_pkt->dst_addr; + } else { + dev_dbg(hdev->dev, "DMA direction is SRAM --> DRAM\n"); + sram_memory_addr = user_dma_pkt->src_addr; + dram_memory_addr = user_dma_pkt->dst_addr; + } + + if (!hl_mem_area_inside_range(sram_memory_addr, user_dma_pkt->tsize, + hdev->asic_prop.sram_user_base_address, + hdev->asic_prop.sram_end_address)) { + dev_err(hdev->dev, "SRAM address 0x%llx + 0x%x is invalid\n", + sram_memory_addr, user_dma_pkt->tsize); + return -EFAULT; + } + + if (!hl_mem_area_inside_range(dram_memory_addr, user_dma_pkt->tsize, + hdev->asic_prop.dram_user_base_address, + hdev->asic_prop.dram_end_address)) { + dev_err(hdev->dev, "DRAM address 0x%llx + 0x%x is invalid\n", + dram_memory_addr, user_dma_pkt->tsize); + return -EFAULT; + } + + parser->patched_cb_size += sizeof(*user_dma_pkt); + + return 0; +} + +static int goya_validate_dma_pkt_no_mmu(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt) +{ + enum goya_dma_direction user_dir; + int rc; + + dev_dbg(hdev->dev, "DMA packet details:\n"); + dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr); + dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr); + dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize); + + user_dir = (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >> + GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT; + + /* + * Special handling for DMA with size 0. The H/W has a bug where + * this can cause the QMAN DMA to get stuck, so block it here. + */ + if (user_dma_pkt->tsize == 0) { + dev_err(hdev->dev, + "Got DMA with size 0, might reset the device\n"); + return -EINVAL; + } + + if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM)) + rc = goya_validate_dma_pkt_no_host(hdev, parser, user_dma_pkt); + else + rc = goya_validate_dma_pkt_host(hdev, parser, user_dma_pkt); + + return rc; +} + +static int goya_validate_dma_pkt_mmu(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt) +{ + dev_dbg(hdev->dev, "DMA packet details:\n"); + dev_dbg(hdev->dev, "source == 0x%llx\n", user_dma_pkt->src_addr); + dev_dbg(hdev->dev, "destination == 0x%llx\n", user_dma_pkt->dst_addr); + dev_dbg(hdev->dev, "size == %u\n", user_dma_pkt->tsize); + + /* + * WA for HW-23. + * We can't allow user to read from Host using QMANs other than 1. + */ + if (parser->hw_queue_id > GOYA_QUEUE_ID_DMA_1 && + hl_mem_area_inside_range(user_dma_pkt->src_addr, + user_dma_pkt->tsize, + hdev->asic_prop.va_space_host_start_address, + hdev->asic_prop.va_space_host_end_address)) { + dev_err(hdev->dev, + "Can't DMA from host on queue other then 1\n"); + return -EFAULT; + } + + if (user_dma_pkt->tsize == 0) { + dev_err(hdev->dev, + "Got DMA with size 0, might reset the device\n"); + return -EINVAL; + } + + parser->patched_cb_size += sizeof(*user_dma_pkt); + + return 0; +} + +static int goya_validate_wreg32(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_wreg32 *wreg_pkt) +{ + struct goya_device *goya = hdev->asic_specific; + u32 sob_start_addr, sob_end_addr; + u16 reg_offset; + + reg_offset = wreg_pkt->ctl & GOYA_PKT_WREG32_CTL_REG_OFFSET_MASK; + + dev_dbg(hdev->dev, "WREG32 packet details:\n"); + dev_dbg(hdev->dev, "reg_offset == 0x%x\n", reg_offset); + dev_dbg(hdev->dev, "value == 0x%x\n", wreg_pkt->value); + + if (reg_offset != (mmDMA_CH_1_WR_COMP_ADDR_LO & 0xFFFF)) { + dev_err(hdev->dev, "WREG32 packet with illegal address 0x%x\n", + reg_offset); + return -EPERM; + } + + /* + * With MMU, DMA channels are not secured, so it doesn't matter where + * the WR COMP will be written to because it will go out with + * non-secured property + */ + if (goya->hw_cap_initialized & HW_CAP_MMU) + return 0; + + sob_start_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_0); + sob_end_addr = lower_32_bits(CFG_BASE + mmSYNC_MNGR_SOB_OBJ_1023); + + if ((wreg_pkt->value < sob_start_addr) || + (wreg_pkt->value > sob_end_addr)) { + + dev_err(hdev->dev, "WREG32 packet with illegal value 0x%x\n", + wreg_pkt->value); + return -EPERM; + } + + return 0; +} + +static int goya_validate_cb(struct hl_device *hdev, + struct hl_cs_parser *parser, bool is_mmu) +{ + u32 cb_parsed_length = 0; + int rc = 0; + + parser->patched_cb_size = 0; + + /* cb_user_size is more than 0 so loop will always be executed */ + while (cb_parsed_length < parser->user_cb_size) { + enum packet_id pkt_id; + u16 pkt_size; + void *user_pkt; + + user_pkt = (void *) (uintptr_t) + (parser->user_cb->kernel_address + cb_parsed_length); + + pkt_id = (enum packet_id) (((*(u64 *) user_pkt) & + PACKET_HEADER_PACKET_ID_MASK) >> + PACKET_HEADER_PACKET_ID_SHIFT); + + pkt_size = goya_packet_sizes[pkt_id]; + cb_parsed_length += pkt_size; + if (cb_parsed_length > parser->user_cb_size) { + dev_err(hdev->dev, + "packet 0x%x is out of CB boundary\n", pkt_id); + rc = -EINVAL; + break; + } + + switch (pkt_id) { + case PACKET_WREG_32: + /* + * Although it is validated after copy in patch_cb(), + * need to validate here as well because patch_cb() is + * not called in MMU path while this function is called + */ + rc = goya_validate_wreg32(hdev, parser, user_pkt); + break; + + case PACKET_WREG_BULK: + dev_err(hdev->dev, + "User not allowed to use WREG_BULK\n"); + rc = -EPERM; + break; + + case PACKET_MSG_PROT: + dev_err(hdev->dev, + "User not allowed to use MSG_PROT\n"); + rc = -EPERM; + break; + + case PACKET_CP_DMA: + dev_err(hdev->dev, "User not allowed to use CP_DMA\n"); + rc = -EPERM; + break; + + case PACKET_STOP: + dev_err(hdev->dev, "User not allowed to use STOP\n"); + rc = -EPERM; + break; + + case PACKET_LIN_DMA: + if (is_mmu) + rc = goya_validate_dma_pkt_mmu(hdev, parser, + user_pkt); + else + rc = goya_validate_dma_pkt_no_mmu(hdev, parser, + user_pkt); + break; + + case PACKET_MSG_LONG: + case PACKET_MSG_SHORT: + case PACKET_FENCE: + case PACKET_NOP: + parser->patched_cb_size += pkt_size; + break; + + default: + dev_err(hdev->dev, "Invalid packet header 0x%x\n", + pkt_id); + rc = -EINVAL; + break; + } + + if (rc) + break; + } + + /* + * The new CB should have space at the end for two MSG_PROT packets: + * 1. A packet that will act as a completion packet + * 2. A packet that will generate MSI-X interrupt + */ + parser->patched_cb_size += sizeof(struct packet_msg_prot) * 2; + + return rc; +} + +static int goya_patch_dma_packet(struct hl_device *hdev, + struct hl_cs_parser *parser, + struct packet_lin_dma *user_dma_pkt, + struct packet_lin_dma *new_dma_pkt, + u32 *new_dma_pkt_size) +{ + struct hl_userptr *userptr; + struct scatterlist *sg, *sg_next_iter; + u32 count, len, dma_desc_cnt, len_next; + dma_addr_t dma_addr, dma_addr_next; + enum goya_dma_direction user_dir; + u64 device_memory_addr, addr; + enum dma_data_direction dir; + struct sg_table *sgt; + bool skip_host_mem_pin = false; + bool user_memset; + u32 user_rdcomp_mask, user_wrcomp_mask; + + user_dir = (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_DMA_DIR_MASK) >> + GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT; + + user_memset = (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_MEMSET_MASK) >> + GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT; + + if ((user_dir == DMA_DRAM_TO_SRAM) || (user_dir == DMA_SRAM_TO_DRAM) || + (user_dma_pkt->tsize == 0)) { + memcpy(new_dma_pkt, user_dma_pkt, sizeof(*new_dma_pkt)); + *new_dma_pkt_size = sizeof(*new_dma_pkt); + return 0; + } + + if ((user_dir == DMA_HOST_TO_DRAM) || (user_dir == DMA_HOST_TO_SRAM)) { + addr = user_dma_pkt->src_addr; + device_memory_addr = user_dma_pkt->dst_addr; + dir = DMA_TO_DEVICE; + if (user_memset) + skip_host_mem_pin = true; + } else { + addr = user_dma_pkt->dst_addr; + device_memory_addr = user_dma_pkt->src_addr; + dir = DMA_FROM_DEVICE; + } + + if ((!skip_host_mem_pin) && + (hl_userptr_is_pinned(hdev, addr, user_dma_pkt->tsize, + parser->job_userptr_list, &userptr) == false)) { + dev_err(hdev->dev, "Userptr 0x%llx + 0x%x NOT mapped\n", + addr, user_dma_pkt->tsize); + return -EFAULT; + } + + if ((user_memset) && (dir == DMA_TO_DEVICE)) { + memcpy(new_dma_pkt, user_dma_pkt, sizeof(*user_dma_pkt)); + *new_dma_pkt_size = sizeof(*user_dma_pkt); + return 0; + } + + user_rdcomp_mask = + (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK); + + user_wrcomp_mask = + (user_dma_pkt->ctl & GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK); + + sgt = userptr->sgt; + dma_desc_cnt = 0; + + for_each_sg(sgt->sgl, sg, sgt->nents, count) { + len = sg_dma_len(sg); + dma_addr = sg_dma_address(sg); + + if (len == 0) + break; + + while ((count + 1) < sgt->nents) { + sg_next_iter = sg_next(sg); + len_next = sg_dma_len(sg_next_iter); + dma_addr_next = sg_dma_address(sg_next_iter); + + if (len_next == 0) + break; + + if ((dma_addr + len == dma_addr_next) && + (len + len_next <= DMA_MAX_TRANSFER_SIZE)) { + len += len_next; + count++; + sg = sg_next_iter; + } else { + break; + } + } + + new_dma_pkt->ctl = user_dma_pkt->ctl; + if (likely(dma_desc_cnt)) + new_dma_pkt->ctl &= ~GOYA_PKT_CTL_EB_MASK; + new_dma_pkt->ctl &= ~(GOYA_PKT_LIN_DMA_CTL_RDCOMP_MASK | + GOYA_PKT_LIN_DMA_CTL_WRCOMP_MASK); + new_dma_pkt->tsize = len; + + dma_addr += hdev->asic_prop.host_phys_base_address; + + if (dir == DMA_TO_DEVICE) { + new_dma_pkt->src_addr = dma_addr; + new_dma_pkt->dst_addr = device_memory_addr; + } else { + new_dma_pkt->src_addr = device_memory_addr; + new_dma_pkt->dst_addr = dma_addr; + } + + if (!user_memset) + device_memory_addr += len; + dma_desc_cnt++; + new_dma_pkt++; + } + + if (!dma_desc_cnt) { + dev_err(hdev->dev, + "Error of 0 SG entries when patching DMA packet\n"); + return -EFAULT; + } + + /* Fix the last dma packet - rdcomp/wrcomp must be as user set them */ + new_dma_pkt--; + new_dma_pkt->ctl |= (user_rdcomp_mask | user_wrcomp_mask); + + *new_dma_pkt_size = dma_desc_cnt * sizeof(struct packet_lin_dma); + + return 0; +} + +static int goya_patch_cb(struct hl_device *hdev, + struct hl_cs_parser *parser) +{ + u32 cb_parsed_length = 0; + u32 cb_patched_cur_length = 0; + int rc = 0; + + /* cb_user_size is more than 0 so loop will always be executed */ + while (cb_parsed_length < parser->user_cb_size) { + enum packet_id pkt_id; + u16 pkt_size; + u32 new_pkt_size = 0; + void *user_pkt, *kernel_pkt; + + user_pkt = (void *) (uintptr_t) + (parser->user_cb->kernel_address + cb_parsed_length); + kernel_pkt = (void *) (uintptr_t) + (parser->patched_cb->kernel_address + + cb_patched_cur_length); + + pkt_id = (enum packet_id) (((*(u64 *) user_pkt) & + PACKET_HEADER_PACKET_ID_MASK) >> + PACKET_HEADER_PACKET_ID_SHIFT); + + pkt_size = goya_packet_sizes[pkt_id]; + cb_parsed_length += pkt_size; + if (cb_parsed_length > parser->user_cb_size) { + dev_err(hdev->dev, + "packet 0x%x is out of CB boundary\n", pkt_id); + rc = -EINVAL; + break; + } + + switch (pkt_id) { + case PACKET_LIN_DMA: + rc = goya_patch_dma_packet(hdev, parser, user_pkt, + kernel_pkt, &new_pkt_size); + cb_patched_cur_length += new_pkt_size; + break; + + case PACKET_WREG_32: + memcpy(kernel_pkt, user_pkt, pkt_size); + cb_patched_cur_length += pkt_size; + rc = goya_validate_wreg32(hdev, parser, kernel_pkt); + break; + + case PACKET_WREG_BULK: + dev_err(hdev->dev, + "User not allowed to use WREG_BULK\n"); + rc = -EPERM; + break; + + case PACKET_MSG_PROT: + dev_err(hdev->dev, + "User not allowed to use MSG_PROT\n"); + rc = -EPERM; + break; + + case PACKET_CP_DMA: + dev_err(hdev->dev, "User not allowed to use CP_DMA\n"); + rc = -EPERM; + break; + + case PACKET_STOP: + dev_err(hdev->dev, "User not allowed to use STOP\n"); + rc = -EPERM; + break; + + case PACKET_MSG_LONG: + case PACKET_MSG_SHORT: + case PACKET_FENCE: + case PACKET_NOP: + memcpy(kernel_pkt, user_pkt, pkt_size); + cb_patched_cur_length += pkt_size; + break; + + default: + dev_err(hdev->dev, "Invalid packet header 0x%x\n", + pkt_id); + rc = -EINVAL; + break; + } + + if (rc) + break; + } + + return rc; +} + +static int goya_parse_cb_mmu(struct hl_device *hdev, + struct hl_cs_parser *parser) +{ + u64 patched_cb_handle; + u32 patched_cb_size; + struct hl_cb *user_cb; + int rc; + + /* + * The new CB should have space at the end for two MSG_PROT pkt: + * 1. A packet that will act as a completion packet + * 2. A packet that will generate MSI-X interrupt + */ + parser->patched_cb_size = parser->user_cb_size + + sizeof(struct packet_msg_prot) * 2; + + rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, + parser->patched_cb_size, + &patched_cb_handle, HL_KERNEL_ASID_ID); + + if (rc) { + dev_err(hdev->dev, + "Failed to allocate patched CB for DMA CS %d\n", + rc); + return rc; + } + + patched_cb_handle >>= PAGE_SHIFT; + parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, + (u32) patched_cb_handle); + /* hl_cb_get should never fail here so use kernel WARN */ + WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n", + (u32) patched_cb_handle); + if (!parser->patched_cb) { + rc = -EFAULT; + goto out; + } + + /* + * The check that parser->user_cb_size <= parser->user_cb->size was done + * in validate_queue_index(). + */ + memcpy((void *) (uintptr_t) parser->patched_cb->kernel_address, + (void *) (uintptr_t) parser->user_cb->kernel_address, + parser->user_cb_size); + + patched_cb_size = parser->patched_cb_size; + + /* validate patched CB instead of user CB */ + user_cb = parser->user_cb; + parser->user_cb = parser->patched_cb; + rc = goya_validate_cb(hdev, parser, true); + parser->user_cb = user_cb; + + if (rc) { + hl_cb_put(parser->patched_cb); + goto out; + } + + if (patched_cb_size != parser->patched_cb_size) { + dev_err(hdev->dev, "user CB size mismatch\n"); + hl_cb_put(parser->patched_cb); + rc = -EINVAL; + goto out; + } + +out: + /* + * Always call cb destroy here because we still have 1 reference + * to it by calling cb_get earlier. After the job will be completed, + * cb_put will release it, but here we want to remove it from the + * idr + */ + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, + patched_cb_handle << PAGE_SHIFT); + + return rc; +} + +int goya_parse_cb_no_mmu(struct hl_device *hdev, struct hl_cs_parser *parser) +{ + u64 patched_cb_handle; + int rc; + + rc = goya_validate_cb(hdev, parser, false); + + if (rc) + goto free_userptr; + + rc = hl_cb_create(hdev, &hdev->kernel_cb_mgr, + parser->patched_cb_size, + &patched_cb_handle, HL_KERNEL_ASID_ID); + if (rc) { + dev_err(hdev->dev, + "Failed to allocate patched CB for DMA CS %d\n", rc); + goto free_userptr; + } + + patched_cb_handle >>= PAGE_SHIFT; + parser->patched_cb = hl_cb_get(hdev, &hdev->kernel_cb_mgr, + (u32) patched_cb_handle); + /* hl_cb_get should never fail here so use kernel WARN */ + WARN(!parser->patched_cb, "DMA CB handle invalid 0x%x\n", + (u32) patched_cb_handle); + if (!parser->patched_cb) { + rc = -EFAULT; + goto out; + } + + rc = goya_patch_cb(hdev, parser); + + if (rc) + hl_cb_put(parser->patched_cb); + +out: + /* + * Always call cb destroy here because we still have 1 reference + * to it by calling cb_get earlier. After the job will be completed, + * cb_put will release it, but here we want to remove it from the + * idr + */ + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, + patched_cb_handle << PAGE_SHIFT); + +free_userptr: + if (rc) + hl_userptr_delete_list(hdev, parser->job_userptr_list); + return rc; +} + +int goya_parse_cb_no_ext_quque(struct hl_device *hdev, + struct hl_cs_parser *parser) +{ + struct asic_fixed_properties *asic_prop = &hdev->asic_prop; + struct goya_device *goya = hdev->asic_specific; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) { + /* For internal queue jobs, just check if cb address is valid */ + if (hl_mem_area_inside_range( + (u64) (uintptr_t) parser->user_cb, + parser->user_cb_size, + asic_prop->sram_user_base_address, + asic_prop->sram_end_address)) + return 0; + + if (hl_mem_area_inside_range( + (u64) (uintptr_t) parser->user_cb, + parser->user_cb_size, + asic_prop->dram_user_base_address, + asic_prop->dram_end_address)) + return 0; + + dev_err(hdev->dev, + "Internal CB address 0x%llx + 0x%x is not in SRAM nor in DRAM\n", + (u64) (uintptr_t) parser->user_cb, + parser->user_cb_size); + + return -EFAULT; + } + + return 0; +} + +int goya_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser) +{ + struct goya_device *goya = hdev->asic_specific; + + if (!parser->ext_queue) + return goya_parse_cb_no_ext_quque(hdev, parser); + + if ((goya->hw_cap_initialized & HW_CAP_MMU) && parser->use_virt_addr) + return goya_parse_cb_mmu(hdev, parser); + else + return goya_parse_cb_no_mmu(hdev, parser); +} + +void goya_add_end_of_cb_packets(u64 kernel_address, u32 len, u64 cq_addr, + u32 cq_val, u32 msix_vec) +{ + struct packet_msg_prot *cq_pkt; + + cq_pkt = (struct packet_msg_prot *) (uintptr_t) + (kernel_address + len - (sizeof(struct packet_msg_prot) * 2)); + + cq_pkt->ctl = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) | + (1 << GOYA_PKT_CTL_EB_SHIFT) | + (1 << GOYA_PKT_CTL_MB_SHIFT); + cq_pkt->value = cq_val; + cq_pkt->addr = cq_addr; + + cq_pkt++; + + cq_pkt->ctl = (PACKET_MSG_PROT << GOYA_PKT_CTL_OPCODE_SHIFT) | + (1 << GOYA_PKT_CTL_MB_SHIFT); + cq_pkt->value = msix_vec & 0x7FF; + cq_pkt->addr = CFG_BASE + mmPCIE_DBI_MSIX_DOORBELL_OFF; +} + static void goya_update_eq_ci(struct hl_device *hdev, u32 val) { WREG32(mmPSOC_GLOBAL_CONF_SCRATCHPAD_6, val); } +int goya_context_switch(struct hl_device *hdev, u32 asid) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct packet_lin_dma *clear_sram_pkt; + struct hl_cs_parser parser; + struct hl_cs_job *job; + u32 cb_size; + struct hl_cb *cb; + int rc; + + cb = hl_cb_kernel_create(hdev, PAGE_SIZE); + if (!cb) + return -EFAULT; + + clear_sram_pkt = (struct packet_lin_dma *) + (uintptr_t) cb->kernel_address; + + memset(clear_sram_pkt, 0, sizeof(*clear_sram_pkt)); + cb_size = sizeof(*clear_sram_pkt); + + clear_sram_pkt->ctl = ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) | + (DMA_HOST_TO_SRAM << GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT) | + (1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) | + (1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) | + (1 << GOYA_PKT_CTL_RB_SHIFT) | + (1 << GOYA_PKT_CTL_MB_SHIFT)); + + clear_sram_pkt->src_addr = 0x7777777777777777ull; + clear_sram_pkt->dst_addr = prop->sram_base_address; + if (hdev->pldm) + clear_sram_pkt->tsize = 0x10000; + else + clear_sram_pkt->tsize = prop->sram_size; + + job = hl_cs_allocate_job(hdev, true); + if (!job) { + dev_err(hdev->dev, "Failed to allocate a new job\n"); + rc = -ENOMEM; + goto release_cb; + } + + job->id = 0; + job->user_cb = cb; + job->user_cb->cs_cnt++; + job->user_cb_size = cb_size; + job->hw_queue_id = GOYA_QUEUE_ID_DMA_0; + + parser.ctx_id = HL_KERNEL_ASID_ID; + parser.cs_sequence = 0; + parser.job_id = job->id; + parser.hw_queue_id = job->hw_queue_id; + parser.job_userptr_list = &job->userptr_list; + parser.user_cb = job->user_cb; + parser.user_cb_size = job->user_cb_size; + parser.ext_queue = job->ext_queue; + parser.use_virt_addr = hdev->mmu_enable; + + rc = hdev->asic_funcs->cs_parser(hdev, &parser); + if (rc) { + dev_err(hdev->dev, + "Failed to parse kernel CB during context switch\n"); + goto free_job; + } + + job->patched_cb = parser.patched_cb; + job->job_cb_size = parser.patched_cb_size; + job->patched_cb->cs_cnt++; + + rc = goya_send_job_on_qman0(hdev, job); + + job->patched_cb->cs_cnt--; + hl_cb_put(job->patched_cb); + +free_job: + hl_userptr_delete_list(hdev, &job->userptr_list); + kfree(job); + cb->cs_cnt--; + +release_cb: + hl_cb_put(cb); + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); + + return rc; +} + +void goya_restore_phase_topology(struct hl_device *hdev) +{ + int i, num_of_sob_in_longs, num_of_mon_in_longs; + + num_of_sob_in_longs = + ((mmSYNC_MNGR_SOB_OBJ_1023 - mmSYNC_MNGR_SOB_OBJ_0) + 4); + + num_of_mon_in_longs = + ((mmSYNC_MNGR_MON_STATUS_255 - mmSYNC_MNGR_MON_STATUS_0) + 4); + + for (i = 0 ; i < num_of_sob_in_longs ; i += 4) + WREG32(mmSYNC_MNGR_SOB_OBJ_0 + i, 0); + + for (i = 0 ; i < num_of_mon_in_longs ; i += 4) + WREG32(mmSYNC_MNGR_MON_STATUS_0 + i, 0); + + /* Flush all WREG to prevent race */ + i = RREG32(mmSYNC_MNGR_SOB_OBJ_0); +} + static void goya_get_axi_name(struct hl_device *hdev, u32 agent_id, u16 event_type, char *axi_name, int len) { @@ -3608,6 +4673,59 @@ static void goya_disable_clock_gating(struct hl_device *hdev) } +static bool goya_is_device_idle(struct hl_device *hdev) +{ + u64 offset, dma_qm_reg, tpc_qm_reg, tpc_cmdq_reg, tpc_cfg_reg; + int i; + + offset = mmDMA_QM_1_GLBL_STS0 - mmDMA_QM_0_GLBL_STS0; + + for (i = 0 ; i < DMA_MAX_NUM ; i++) { + dma_qm_reg = mmDMA_QM_0_GLBL_STS0 + i * offset; + + if ((RREG32(dma_qm_reg) & DMA_QM_IDLE_MASK) != + DMA_QM_IDLE_MASK) + return false; + } + + offset = mmTPC1_QM_GLBL_STS0 - mmTPC0_QM_GLBL_STS0; + + for (i = 0 ; i < TPC_MAX_NUM ; i++) { + tpc_qm_reg = mmTPC0_QM_GLBL_STS0 + i * offset; + tpc_cmdq_reg = mmTPC0_CMDQ_GLBL_STS0 + i * offset; + tpc_cfg_reg = mmTPC0_CFG_STATUS + i * offset; + + if ((RREG32(tpc_qm_reg) & TPC_QM_IDLE_MASK) != + TPC_QM_IDLE_MASK) + return false; + + if ((RREG32(tpc_cmdq_reg) & TPC_CMDQ_IDLE_MASK) != + TPC_CMDQ_IDLE_MASK) + return false; + + if ((RREG32(tpc_cfg_reg) & TPC_CFG_IDLE_MASK) != + TPC_CFG_IDLE_MASK) + return false; + } + + if ((RREG32(mmMME_QM_GLBL_STS0) & MME_QM_IDLE_MASK) != + MME_QM_IDLE_MASK) + return false; + + if ((RREG32(mmMME_CMDQ_GLBL_STS0) & MME_CMDQ_IDLE_MASK) != + MME_CMDQ_IDLE_MASK) + return false; + + if ((RREG32(mmMME_ARCH_STATUS) & MME_ARCH_IDLE_MASK) != + MME_ARCH_IDLE_MASK) + return false; + + if (RREG32(mmMME_SHADOW_0_STATUS) & MME_SHADOW_IDLE_MASK) + return false; + + return true; +} + static void goya_hw_queues_lock(struct hl_device *hdev) { struct goya_device *goya = hdev->asic_specific; @@ -3700,7 +4818,14 @@ static const struct hl_asic_funcs goya_funcs = { .dma_pool_free = goya_dma_pool_free, .cpu_accessible_dma_pool_alloc = goya_cpu_accessible_dma_pool_alloc, .cpu_accessible_dma_pool_free = goya_cpu_accessible_dma_pool_free, + .hl_dma_unmap_sg = goya_dma_unmap_sg, + .cs_parser = goya_cs_parser, + .asic_dma_map_sg = goya_dma_map_sg, + .get_dma_desc_list_size = goya_get_dma_desc_list_size, + .add_end_of_cb_packets = goya_add_end_of_cb_packets, .update_eq_ci = goya_update_eq_ci, + .context_switch = goya_context_switch, + .restore_phase_topology = goya_restore_phase_topology, .add_device_attr = goya_add_device_attr, .handle_eqe = goya_handle_eqe, .set_pll_profile = goya_set_pll_profile, @@ -3708,6 +4833,7 @@ static const struct hl_asic_funcs goya_funcs = { .send_heartbeat = goya_send_heartbeat, .enable_clock_gating = goya_init_clock_gating, .disable_clock_gating = goya_disable_clock_gating, + .is_device_idle = goya_is_device_idle, .soft_reset_late_init = goya_soft_reset_late_init, .hw_queues_lock = goya_hw_queues_lock, .hw_queues_unlock = goya_hw_queues_unlock, diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h index 744e37bbc2a6..9adc7c6ec08b 100644 --- a/drivers/misc/habanalabs/habanalabs.h +++ b/drivers/misc/habanalabs/habanalabs.h @@ -16,6 +16,9 @@ #include #include #include +#include +#include +#include #define HL_NAME "habanalabs" @@ -31,6 +34,11 @@ #define HL_MAX_QUEUES 128 +#define HL_MAX_JOBS_PER_CS 64 + +/* MUST BE POWER OF 2 and larger than 1 */ +#define HL_MAX_PENDING_CS 64 + struct hl_device; struct hl_fpriv; @@ -61,6 +69,16 @@ struct hw_queue_properties { u8 kmd_only; }; +/** + * enum vm_type_t - virtual memory mapping request information. + * @VM_TYPE_USERPTR: mapping of user memory to device virtual address. + * @VM_TYPE_PHYS_LIST: mapping of DRAM memory to device virtual address. + */ +enum vm_type_t { + VM_TYPE_USERPTR, + VM_TYPE_PHYS_LIST +}; + /** * enum hl_device_hw_state - H/W device state. use this to understand whether * to do reset before hw_init or not @@ -147,6 +165,19 @@ struct asic_fixed_properties { u8 tpc_enabled_mask; }; +/** + * struct hl_dma_fence - wrapper for fence object used by command submissions. + * @base_fence: kernel fence object. + * @lock: spinlock to protect fence. + * @hdev: habanalabs device structure. + * @cs_seq: command submission sequence number. + */ +struct hl_dma_fence { + struct dma_fence base_fence; + spinlock_t lock; + struct hl_device *hdev; + u64 cs_seq; +}; /* * Command Buffers @@ -175,6 +206,7 @@ struct hl_cb_mgr { * @mmap_size: Holds the CB's size that was mmaped. * @size: holds the CB's size. * @id: the CB's ID. + * @cs_cnt: holds number of CS that this CB participates in. * @ctx_id: holds the ID of the owner's context. * @mmap: true if the CB is currently mmaped to user. * @is_pool: true if CB was acquired from the pool, false otherwise. @@ -189,6 +221,7 @@ struct hl_cb { u32 mmap_size; u32 size; u32 id; + u32 cs_cnt; u32 ctx_id; u8 mmap; u8 is_pool; @@ -313,6 +346,8 @@ enum hl_asic_type { ASIC_INVALID }; +struct hl_cs_parser; + /** * enum hl_pm_mng_profile - power management profile. * @PM_AUTO: internal clock is set by KMD. @@ -372,7 +407,14 @@ enum hl_pll_frequency { * @dma_pool_free: free small DMA allocation from pool. * @cpu_accessible_dma_pool_alloc: allocate CPU PQ packet from DMA pool. * @cpu_accessible_dma_pool_free: free CPU PQ packet from DMA pool. + * @hl_dma_unmap_sg: DMA unmap scatter-gather list. + * @cs_parser: parse Command Submission. + * @asic_dma_map_sg: DMA map scatter-gather list. + * @get_dma_desc_list_size: get number of LIN_DMA packets required for CB. + * @add_end_of_cb_packets: Add packets to the end of CB, if device requires it. * @update_eq_ci: update event queue CI. + * @context_switch: called upon ASID context switch. + * @restore_phase_topology: clear all SOBs amd MONs. * @add_device_attr: add ASIC specific device attributes. * @handle_eqe: handle event queue entry (IRQ) from ArmCP. * @set_pll_profile: change PLL profile (manual/automatic). @@ -380,6 +422,7 @@ enum hl_pll_frequency { * @send_heartbeat: send is-alive packet to ArmCP and verify response. * @enable_clock_gating: enable clock gating for reducing power consumption. * @disable_clock_gating: disable clock for accessing registers on HBW. + * @is_device_idle: return true if device is idle, false otherwise. * @soft_reset_late_init: perform certain actions needed after soft reset. * @hw_queues_lock: acquire H/W queues lock. * @hw_queues_unlock: release H/W queues lock. @@ -419,7 +462,20 @@ struct hl_asic_funcs { size_t size, dma_addr_t *dma_handle); void (*cpu_accessible_dma_pool_free)(struct hl_device *hdev, size_t size, void *vaddr); + void (*hl_dma_unmap_sg)(struct hl_device *hdev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir); + int (*cs_parser)(struct hl_device *hdev, struct hl_cs_parser *parser); + int (*asic_dma_map_sg)(struct hl_device *hdev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir); + u32 (*get_dma_desc_list_size)(struct hl_device *hdev, + struct sg_table *sgt); + void (*add_end_of_cb_packets)(u64 kernel_address, u32 len, u64 cq_addr, + u32 cq_val, u32 msix_num); void (*update_eq_ci)(struct hl_device *hdev, u32 val); + int (*context_switch)(struct hl_device *hdev, u32 asid); + void (*restore_phase_topology)(struct hl_device *hdev); void (*add_device_attr)(struct hl_device *hdev, struct attribute_group *dev_attr_grp); void (*handle_eqe)(struct hl_device *hdev, @@ -430,6 +486,7 @@ struct hl_asic_funcs { int (*send_heartbeat)(struct hl_device *hdev); void (*enable_clock_gating)(struct hl_device *hdev); void (*disable_clock_gating)(struct hl_device *hdev); + bool (*is_device_idle)(struct hl_device *hdev); int (*soft_reset_late_init)(struct hl_device *hdev); void (*hw_queues_lock)(struct hl_device *hdev); void (*hw_queues_unlock)(struct hl_device *hdev); @@ -453,12 +510,28 @@ struct hl_asic_funcs { * @hdev: pointer to the device structure. * @refcount: reference counter for the context. Context is released only when * this hits 0l. It is incremented on CS and CS_WAIT. + * @cs_pending: array of DMA fence objects representing pending CS. + * @cs_sequence: sequence number for CS. Value is assigned to a CS and passed + * to user so user could inquire about CS. It is used as + * index to cs_pending array. + * @cs_lock: spinlock to protect cs_sequence. + * @thread_restore_token: token to prevent multiple threads of the same context + * from running the restore phase. Only one thread + * should run it. + * @thread_restore_wait_token: token to prevent the threads that didn't run + * the restore phase from moving to their execution + * phase before the restore phase has finished. * @asid: context's unique address space ID in the device's MMU. */ struct hl_ctx { struct hl_fpriv *hpriv; struct hl_device *hdev; struct kref refcount; + struct dma_fence *cs_pending[HL_MAX_PENDING_CS]; + u64 cs_sequence; + spinlock_t cs_lock; + atomic_t thread_restore_token; + u32 thread_restore_wait_token; u32 asid; }; @@ -473,14 +546,129 @@ struct hl_ctx_mgr { }; + +/* + * COMMAND SUBMISSIONS + */ + +/** + * struct hl_userptr - memory mapping chunk information + * @vm_type: type of the VM. + * @job_node: linked-list node for hanging the object on the Job's list. + * @vec: pointer to the frame vector. + * @sgt: pointer to the scatter-gather table that holds the pages. + * @dir: for DMA unmapping, the direction must be supplied, so save it. + * @debugfs_list: node in debugfs list of command submissions. + * @addr: user-space virtual pointer to the start of the memory area. + * @size: size of the memory area to pin & map. + * @dma_mapped: true if the SG was mapped to DMA addresses, false otherwise. + */ +struct hl_userptr { + enum vm_type_t vm_type; /* must be first */ + struct list_head job_node; + struct frame_vector *vec; + struct sg_table *sgt; + enum dma_data_direction dir; + struct list_head debugfs_list; + u64 addr; + u32 size; + u8 dma_mapped; +}; + +/** + * struct hl_cs - command submission. + * @jobs_in_queue_cnt: per each queue, maintain counter of submitted jobs. + * @ctx: the context this CS belongs to. + * @job_list: list of the CS's jobs in the various queues. + * @job_lock: spinlock for the CS's jobs list. Needed for free_job. + * @refcount: reference counter for usage of the CS. + * @fence: pointer to the fence object of this CS. + * @work_tdr: delayed work node for TDR. + * @mirror_node : node in device mirror list of command submissions. + * @sequence: the sequence number of this CS. + * @submitted: true if CS was submitted to H/W. + * @completed: true if CS was completed by device. + * @timedout : true if CS was timedout. + * @tdr_active: true if TDR was activated for this CS (to prevent + * double TDR activation). + * @aborted: true if CS was aborted due to some device error. + */ +struct hl_cs { + u8 jobs_in_queue_cnt[HL_MAX_QUEUES]; + struct hl_ctx *ctx; + struct list_head job_list; + spinlock_t job_lock; + struct kref refcount; + struct dma_fence *fence; + struct delayed_work work_tdr; + struct list_head mirror_node; + u64 sequence; + u8 submitted; + u8 completed; + u8 timedout; + u8 tdr_active; + u8 aborted; +}; + /** * struct hl_cs_job - command submission job. + * @cs_node: the node to hang on the CS jobs list. + * @cs: the CS this job belongs to. + * @user_cb: the CB we got from the user. + * @patched_cb: in case of patching, this is internal CB which is submitted on + * the queue instead of the CB we got from the IOCTL. * @finish_work: workqueue object to run when job is completed. + * @userptr_list: linked-list of userptr mappings that belong to this job and + * wait for completion. * @id: the id of this job inside a CS. + * @hw_queue_id: the id of the H/W queue this job is submitted to. + * @user_cb_size: the actual size of the CB we got from the user. + * @job_cb_size: the actual size of the CB that we put on the queue. + * @ext_queue: whether the job is for external queue or internal queue. */ struct hl_cs_job { + struct list_head cs_node; + struct hl_cs *cs; + struct hl_cb *user_cb; + struct hl_cb *patched_cb; struct work_struct finish_work; + struct list_head userptr_list; u32 id; + u32 hw_queue_id; + u32 user_cb_size; + u32 job_cb_size; + u8 ext_queue; +}; + +/** + * struct hl_cs_parser - command submission paerser properties. + * @user_cb: the CB we got from the user. + * @patched_cb: in case of patching, this is internal CB which is submitted on + * the queue instead of the CB we got from the IOCTL. + * @job_userptr_list: linked-list of userptr mappings that belong to the related + * job and wait for completion. + * @cs_sequence: the sequence number of the related CS. + * @ctx_id: the ID of the context the related CS belongs to. + * @hw_queue_id: the id of the H/W queue this job is submitted to. + * @user_cb_size: the actual size of the CB we got from the user. + * @patched_cb_size: the size of the CB after parsing. + * @ext_queue: whether the job is for external queue or internal queue. + * @job_id: the id of the related job inside the related CS. + * @use_virt_addr: whether to treat the addresses in the CB as virtual during + * parsing. + */ +struct hl_cs_parser { + struct hl_cb *user_cb; + struct hl_cb *patched_cb; + struct list_head *job_userptr_list; + u64 cs_sequence; + u32 ctx_id; + u32 hw_queue_id; + u32 user_cb_size; + u32 patched_cb_size; + u8 ext_queue; + u8 job_id; + u8 use_virt_addr; }; @@ -497,6 +685,7 @@ struct hl_cs_job { * @ctx_mgr: context manager to handle multiple context for this FD. * @cb_mgr: command buffer manager to handle multiple buffers for this FD. * @refcount: number of related contexts. + * @restore_phase_mutex: lock for context switch and restore phase. */ struct hl_fpriv { struct hl_device *hdev; @@ -506,6 +695,7 @@ struct hl_fpriv { struct hl_ctx_mgr ctx_mgr; struct hl_cb_mgr cb_mgr; struct kref refcount; + struct mutex restore_phase_mutex; }; @@ -577,6 +767,8 @@ struct hl_device_reset_work { * @eq_wq: work queue of event queue for executing work in process context. * @kernel_ctx: KMD context structure. * @kernel_queues: array of hl_hw_queue. + * @hw_queues_mirror_list: CS mirror list for TDR. + * @hw_queues_mirror_lock: protects hw_queues_mirror_list. * @kernel_cb_mgr: command buffer manager for creating/destroying/handling CGs. * @event_queue: event queue for IRQ from ArmCP. * @dma_pool: DMA pool for small allocations. @@ -604,6 +796,7 @@ struct hl_device_reset_work { * @in_reset: is device in reset flow. * @curr_pll_profile: current PLL profile. * @fd_open_cnt: number of open user processes. + * @timeout_jiffies: device CS timeout value. * @max_power: the max power of the device, as configured by the sysadmin. This * value is saved so in case of hard-reset, KMD will restore this * value and update the F/W after the re-initialization @@ -617,7 +810,10 @@ struct hl_device_reset_work { * @hwmon_initialized: is H/W monitor sensors was initialized. * @hard_reset_pending: is there a hard reset work pending. * @heartbeat: is heartbeat sanity check towards ArmCP enabled. + * @reset_on_lockup: true if a reset should be done in case of stuck CS, false + * otherwise. * @init_done: is the initialization of the device done. + * @mmu_enable: is MMU enabled. */ struct hl_device { struct pci_dev *pdev; @@ -634,6 +830,8 @@ struct hl_device { struct workqueue_struct *eq_wq; struct hl_ctx *kernel_ctx; struct hl_hw_queue *kernel_queues; + struct list_head hw_queues_mirror_list; + spinlock_t hw_queues_mirror_lock; struct hl_cb_mgr kernel_cb_mgr; struct hl_eq event_queue; struct dma_pool *dma_pool; @@ -661,6 +859,7 @@ struct hl_device { atomic_t in_reset; atomic_t curr_pll_profile; atomic_t fd_open_cnt; + u64 timeout_jiffies; u64 max_power; u32 major; u32 high_pll; @@ -672,9 +871,11 @@ struct hl_device { u8 hwmon_initialized; u8 hard_reset_pending; u8 heartbeat; + u8 reset_on_lockup; u8 init_done; /* Parameters for bring-up */ + u8 mmu_enable; u8 cpu_enable; u8 reset_pcilink; u8 cpu_queues_enable; @@ -712,6 +913,58 @@ struct hl_ioctl_desc { * Kernel module functions that can be accessed by entire module */ +/** + * hl_mem_area_inside_range() - Checks whether address+size are inside a range. + * @address: The start address of the area we want to validate. + * @size: The size in bytes of the area we want to validate. + * @range_start_address: The start address of the valid range. + * @range_end_address: The end address of the valid range. + * + * Return: true if the area is inside the valid range, false otherwise. + */ +static inline bool hl_mem_area_inside_range(u64 address, u32 size, + u64 range_start_address, u64 range_end_address) +{ + u64 end_address = address + size; + + if ((address >= range_start_address) && + (end_address <= range_end_address) && + (end_address > address)) + return true; + + return false; +} + +/** + * hl_mem_area_crosses_range() - Checks whether address+size crossing a range. + * @address: The start address of the area we want to validate. + * @size: The size in bytes of the area we want to validate. + * @range_start_address: The start address of the valid range. + * @range_end_address: The end address of the valid range. + * + * Return: true if the area overlaps part or all of the valid range, + * false otherwise. + */ +static inline bool hl_mem_area_crosses_range(u64 address, u32 size, + u64 range_start_address, u64 range_end_address) +{ + u64 end_address = address + size; + + if ((address >= range_start_address) && + (address < range_end_address)) + return true; + + if ((end_address >= range_start_address) && + (end_address < range_end_address)) + return true; + + if ((address < range_start_address) && + (end_address >= range_end_address)) + return true; + + return false; +} + int hl_device_open(struct inode *inode, struct file *filp); bool hl_device_disabled_or_in_reset(struct hl_device *hdev); int create_hdev(struct hl_device **dev, struct pci_dev *pdev, @@ -725,8 +978,10 @@ int hl_hw_queues_create(struct hl_device *hdev); void hl_hw_queues_destroy(struct hl_device *hdev); int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id, u32 cb_size, u64 cb_ptr); +int hl_hw_queue_schedule_cs(struct hl_cs *cs); u32 hl_hw_queue_add_ptr(u32 ptr, u16 val); void hl_hw_queue_inc_ci_kernel(struct hl_device *hdev, u32 hw_queue_id); +void hl_int_hw_queue_update_ci(struct hl_cs *cs); void hl_hw_queue_reset(struct hl_device *hdev, bool hard_reset); #define hl_queue_inc_ptr(p) hl_hw_queue_add_ptr(p, 1) @@ -740,6 +995,8 @@ void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q); void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q); irqreturn_t hl_irq_handler_cq(int irq, void *arg); irqreturn_t hl_irq_handler_eq(int irq, void *arg); +u32 hl_cq_inc_ptr(u32 ptr); + int hl_asid_init(struct hl_device *hdev); void hl_asid_fini(struct hl_device *hdev); unsigned long hl_asid_alloc(struct hl_device *hdev); @@ -748,9 +1005,13 @@ void hl_asid_free(struct hl_device *hdev, unsigned long asid); int hl_ctx_create(struct hl_device *hdev, struct hl_fpriv *hpriv); void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx); int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx); +void hl_ctx_do_release(struct kref *ref); +void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx); int hl_ctx_put(struct hl_ctx *ctx); +struct dma_fence *hl_ctx_get_fence(struct hl_ctx *ctx, u64 seq); void hl_ctx_mgr_init(struct hl_ctx_mgr *mgr); void hl_ctx_mgr_fini(struct hl_device *hdev, struct hl_ctx_mgr *mgr); + int hl_device_init(struct hl_device *hdev, struct class *hclass); void hl_device_fini(struct hl_device *hdev); int hl_device_suspend(struct hl_device *hdev); @@ -782,8 +1043,20 @@ struct hl_cb *hl_cb_kernel_create(struct hl_device *hdev, u32 cb_size); int hl_cb_pool_init(struct hl_device *hdev); int hl_cb_pool_fini(struct hl_device *hdev); +void hl_cs_rollback_all(struct hl_device *hdev); +struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue); + void goya_set_asic_funcs(struct hl_device *hdev); +int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u32 size, + struct hl_userptr *userptr); +int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr); +void hl_userptr_delete_list(struct hl_device *hdev, + struct list_head *userptr_list); +bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size, + struct list_head *userptr_list, + struct hl_userptr **userptr); + long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr); void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq); long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr); @@ -799,5 +1072,7 @@ void hl_set_max_power(struct hl_device *hdev, u64 value); /* IOCTLs */ long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data); +int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data); +int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data); #endif /* HABANALABSP_H_ */ diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c index b0bf77af1e40..77a1cc85e530 100644 --- a/drivers/misc/habanalabs/habanalabs_drv.c +++ b/drivers/misc/habanalabs/habanalabs_drv.c @@ -24,6 +24,17 @@ static struct class *hl_class; DEFINE_IDR(hl_devs_idr); DEFINE_MUTEX(hl_devs_idr_lock); +static int timeout_locked = 5; +static int reset_on_lockup = 1; + +module_param(timeout_locked, int, 0444); +MODULE_PARM_DESC(timeout_locked, + "Device lockup timeout in seconds (0 = disabled, default 5s)"); + +module_param(reset_on_lockup, int, 0444); +MODULE_PARM_DESC(reset_on_lockup, + "Do device reset on lockup (0 = no, 1 = yes, default yes)"); + #define PCI_VENDOR_ID_HABANALABS 0x1da3 #define PCI_IDS_GOYA 0x0001 @@ -113,6 +124,7 @@ int hl_device_open(struct inode *inode, struct file *filp) hpriv->hdev = hdev; filp->private_data = hpriv; hpriv->filp = filp; + mutex_init(&hpriv->restore_phase_mutex); kref_init(&hpriv->refcount); nonseekable_open(inode, filp); @@ -140,6 +152,7 @@ out_err: filp->private_data = NULL; hl_ctx_mgr_fini(hpriv->hdev, &hpriv->ctx_mgr); hl_cb_mgr_fini(hpriv->hdev, &hpriv->cb_mgr); + mutex_destroy(&hpriv->restore_phase_mutex); kfree(hpriv); close_device: @@ -172,8 +185,10 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev, return -ENOMEM; hdev->major = hl_major; + hdev->reset_on_lockup = reset_on_lockup; /* Parameters for bring-up - set them to defaults */ + hdev->mmu_enable = 0; hdev->cpu_enable = 1; hdev->reset_pcilink = 0; hdev->cpu_queues_enable = 1; @@ -193,6 +208,11 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev, if (!hdev->cpu_queues_enable) hdev->heartbeat = 0; + if (timeout_locked) + hdev->timeout_jiffies = msecs_to_jiffies(timeout_locked * 1000); + else + hdev->timeout_jiffies = MAX_SCHEDULE_TIMEOUT; + hdev->disabled = true; hdev->pdev = pdev; /* can be NULL in case of simulator device */ diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c index e56a51f6bab6..481db1a5e97e 100644 --- a/drivers/misc/habanalabs/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/habanalabs_ioctl.c @@ -16,7 +16,9 @@ [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func} static const struct hl_ioctl_desc hl_ioctls[] = { - HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl) + HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl), + HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl), + HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_cs_wait_ioctl) }; #define HL_CORE_IOCTL_COUNT ARRAY_SIZE(hl_ioctls) diff --git a/drivers/misc/habanalabs/hw_queue.c b/drivers/misc/habanalabs/hw_queue.c index 2ec43f36cdb8..68dfda59a875 100644 --- a/drivers/misc/habanalabs/hw_queue.c +++ b/drivers/misc/habanalabs/hw_queue.c @@ -34,6 +34,29 @@ static inline int queue_free_slots(struct hl_hw_queue *q, u32 queue_len) return (abs(delta) - queue_len); } +void hl_int_hw_queue_update_ci(struct hl_cs *cs) +{ + struct hl_device *hdev = cs->ctx->hdev; + struct hl_hw_queue *q; + int i; + + hdev->asic_funcs->hw_queues_lock(hdev); + + if (hdev->disabled) + goto out; + + q = &hdev->kernel_queues[0]; + for (i = 0 ; i < HL_MAX_QUEUES ; i++, q++) { + if (q->queue_type == QUEUE_TYPE_INT) { + q->ci += cs->jobs_in_queue_cnt[i]; + q->ci &= ((q->int_queue_len << 1) - 1); + } + } + +out: + hdev->asic_funcs->hw_queues_unlock(hdev); +} + /* * ext_queue_submit_bd - Submit a buffer descriptor to an external queue * @@ -119,6 +142,37 @@ static int ext_queue_sanity_checks(struct hl_device *hdev, return 0; } +/* + * int_queue_sanity_checks - perform some sanity checks on internal queue + * + * @hdev : pointer to hl_device structure + * @q : pointer to hl_hw_queue structure + * @num_of_entries : how many entries to check for space + * + * H/W queues spinlock should be taken before calling this function + * + * Perform the following: + * - Make sure we have enough space in the h/w queue + * + */ +static int int_queue_sanity_checks(struct hl_device *hdev, + struct hl_hw_queue *q, + int num_of_entries) +{ + int free_slots_cnt; + + /* Check we have enough space in the queue */ + free_slots_cnt = queue_free_slots(q, q->int_queue_len); + + if (free_slots_cnt < num_of_entries) { + dev_dbg(hdev->dev, "Queue %d doesn't have room for %d CBs\n", + q->hw_queue_id, num_of_entries); + return -EAGAIN; + } + + return 0; +} + /* * hl_hw_queue_send_cb_no_cmpl - send a single CB (not a JOB) without completion * @@ -165,6 +219,184 @@ out: return rc; } +/* + * ext_hw_queue_schedule_job - submit an JOB to an external queue + * + * @job: pointer to the job that needs to be submitted to the queue + * + * This function must be called when the scheduler mutex is taken + * + */ +static void ext_hw_queue_schedule_job(struct hl_cs_job *job) +{ + struct hl_device *hdev = job->cs->ctx->hdev; + struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; + struct hl_cq_entry cq_pkt; + struct hl_cq *cq; + u64 cq_addr; + struct hl_cb *cb; + u32 ctl; + u32 len; + u64 ptr; + + /* + * Update the JOB ID inside the BD CTL so the device would know what + * to write in the completion queue + */ + ctl = ((q->pi << BD_CTL_SHADOW_INDEX_SHIFT) & BD_CTL_SHADOW_INDEX_MASK); + + cb = job->patched_cb; + len = job->job_cb_size; + ptr = cb->bus_address; + + cq_pkt.data = (q->pi << CQ_ENTRY_SHADOW_INDEX_SHIFT) + & CQ_ENTRY_SHADOW_INDEX_MASK; + cq_pkt.data |= 1 << CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT; + cq_pkt.data |= 1 << CQ_ENTRY_READY_SHIFT; + + /* + * No need to protect pi_offset because scheduling to the + * H/W queues is done under the scheduler mutex + * + * No need to check if CQ is full because it was already + * checked in hl_queue_sanity_checks + */ + cq = &hdev->completion_queue[q->hw_queue_id]; + cq_addr = cq->bus_address + + hdev->asic_prop.host_phys_base_address; + cq_addr += cq->pi * sizeof(struct hl_cq_entry); + + hdev->asic_funcs->add_end_of_cb_packets(cb->kernel_address, len, + cq_addr, cq_pkt.data, q->hw_queue_id); + + q->shadow_queue[hl_pi_2_offset(q->pi)] = job; + + cq->pi = hl_cq_inc_ptr(cq->pi); + + ext_queue_submit_bd(hdev, q, ctl, len, ptr); +} + +/* + * int_hw_queue_schedule_job - submit an JOB to an internal queue + * + * @job: pointer to the job that needs to be submitted to the queue + * + * This function must be called when the scheduler mutex is taken + * + */ +static void int_hw_queue_schedule_job(struct hl_cs_job *job) +{ + struct hl_device *hdev = job->cs->ctx->hdev; + struct hl_hw_queue *q = &hdev->kernel_queues[job->hw_queue_id]; + struct hl_bd bd; + u64 *pi, *pbd = (u64 *) &bd; + + bd.ctl = 0; + bd.len = job->job_cb_size; + bd.ptr = (u64) (uintptr_t) job->user_cb; + + pi = (u64 *) (uintptr_t) (q->kernel_address + + ((q->pi & (q->int_queue_len - 1)) * sizeof(bd))); + + pi[0] = pbd[0]; + pi[1] = pbd[1]; + + q->pi++; + q->pi &= ((q->int_queue_len << 1) - 1); + + /* Flush PQ entry write. Relevant only for specific ASICs */ + hdev->asic_funcs->flush_pq_write(hdev, pi, pbd[0]); + + hdev->asic_funcs->ring_doorbell(hdev, q->hw_queue_id, q->pi); +} + +/* + * hl_hw_queue_schedule_cs - schedule a command submission + * + * @job : pointer to the CS + * + */ +int hl_hw_queue_schedule_cs(struct hl_cs *cs) +{ + struct hl_device *hdev = cs->ctx->hdev; + struct hl_cs_job *job, *tmp; + struct hl_hw_queue *q; + int rc = 0, i, cq_cnt; + + hdev->asic_funcs->hw_queues_lock(hdev); + + if (hl_device_disabled_or_in_reset(hdev)) { + dev_err(hdev->dev, + "device is disabled or in reset, CS rejected!\n"); + rc = -EPERM; + goto out; + } + + q = &hdev->kernel_queues[0]; + /* This loop assumes all external queues are consecutive */ + for (i = 0, cq_cnt = 0 ; i < HL_MAX_QUEUES ; i++, q++) { + if (q->queue_type == QUEUE_TYPE_EXT) { + if (cs->jobs_in_queue_cnt[i]) { + rc = ext_queue_sanity_checks(hdev, q, + cs->jobs_in_queue_cnt[i], true); + if (rc) + goto unroll_cq_resv; + cq_cnt++; + } + } else if (q->queue_type == QUEUE_TYPE_INT) { + if (cs->jobs_in_queue_cnt[i]) { + rc = int_queue_sanity_checks(hdev, q, + cs->jobs_in_queue_cnt[i]); + if (rc) + goto unroll_cq_resv; + } + } + } + + spin_lock(&hdev->hw_queues_mirror_lock); + list_add_tail(&cs->mirror_node, &hdev->hw_queues_mirror_list); + + /* Queue TDR if the CS is the first entry and if timeout is wanted */ + if ((hdev->timeout_jiffies != MAX_SCHEDULE_TIMEOUT) && + (list_first_entry(&hdev->hw_queues_mirror_list, + struct hl_cs, mirror_node) == cs)) { + cs->tdr_active = true; + schedule_delayed_work(&cs->work_tdr, hdev->timeout_jiffies); + spin_unlock(&hdev->hw_queues_mirror_lock); + } else { + spin_unlock(&hdev->hw_queues_mirror_lock); + } + + list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) { + if (job->ext_queue) + ext_hw_queue_schedule_job(job); + else + int_hw_queue_schedule_job(job); + } + + cs->submitted = true; + + goto out; + +unroll_cq_resv: + /* This loop assumes all external queues are consecutive */ + q = &hdev->kernel_queues[0]; + for (i = 0 ; (i < HL_MAX_QUEUES) && (cq_cnt > 0) ; i++, q++) { + if ((q->queue_type == QUEUE_TYPE_EXT) && + (cs->jobs_in_queue_cnt[i])) { + atomic_t *free_slots = + &hdev->completion_queue[i].free_slots_cnt; + atomic_add(cs->jobs_in_queue_cnt[i], free_slots); + cq_cnt--; + } + } + +out: + hdev->asic_funcs->hw_queues_unlock(hdev); + + return rc; +} + /* * hl_hw_queue_inc_ci_kernel - increment ci for kernel's queue * diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c new file mode 100644 index 000000000000..ad14376a1c25 --- /dev/null +++ b/drivers/misc/habanalabs/memory.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" + +#include +#include + +/* + * hl_pin_host_memory - pins a chunk of host memory + * + * @hdev : pointer to the habanalabs device structure + * @addr : the user-space virtual address of the memory area + * @size : the size of the memory area + * @userptr : pointer to hl_userptr structure + * + * This function does the following: + * - Pins the physical pages + * - Create a SG list from those pages + */ +int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u32 size, + struct hl_userptr *userptr) +{ + u64 start, end; + u32 npages, offset; + int rc; + + if (!size) { + dev_err(hdev->dev, "size to pin is invalid - %d\n", + size); + return -EINVAL; + } + + if (!access_ok((void __user *) (uintptr_t) addr, size)) { + dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", + addr); + return -EFAULT; + } + + /* + * If the combination of the address and size requested for this memory + * region causes an integer overflow, return error. + */ + if (((addr + size) < addr) || + PAGE_ALIGN(addr + size) < (addr + size)) { + dev_err(hdev->dev, + "user pointer 0x%llx + %u causes integer overflow\n", + addr, size); + return -EINVAL; + } + + start = addr & PAGE_MASK; + offset = addr & ~PAGE_MASK; + end = PAGE_ALIGN(addr + size); + npages = (end - start) >> PAGE_SHIFT; + + userptr->size = size; + userptr->addr = addr; + userptr->dma_mapped = false; + INIT_LIST_HEAD(&userptr->job_node); + + userptr->vec = frame_vector_create(npages); + if (!userptr->vec) { + dev_err(hdev->dev, "Failed to create frame vector\n"); + return -ENOMEM; + } + + rc = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE, + userptr->vec); + + if (rc != npages) { + dev_err(hdev->dev, + "Failed to map host memory, user ptr probably wrong\n"); + if (rc < 0) + goto destroy_framevec; + rc = -EFAULT; + goto put_framevec; + } + + if (frame_vector_to_pages(userptr->vec) < 0) { + dev_err(hdev->dev, + "Failed to translate frame vector to pages\n"); + rc = -EFAULT; + goto put_framevec; + } + + userptr->sgt = kzalloc(sizeof(*userptr->sgt), GFP_ATOMIC); + if (!userptr->sgt) { + rc = -ENOMEM; + goto put_framevec; + } + + rc = sg_alloc_table_from_pages(userptr->sgt, + frame_vector_pages(userptr->vec), + npages, offset, size, GFP_ATOMIC); + if (rc < 0) { + dev_err(hdev->dev, "failed to create SG table from pages\n"); + goto free_sgt; + } + + return 0; + +free_sgt: + kfree(userptr->sgt); +put_framevec: + put_vaddr_frames(userptr->vec); +destroy_framevec: + frame_vector_destroy(userptr->vec); + return rc; +} + +/* + * hl_unpin_host_memory - unpins a chunk of host memory + * + * @hdev : pointer to the habanalabs device structure + * @userptr : pointer to hl_userptr structure + * + * This function does the following: + * - Unpins the physical pages related to the host memory + * - Free the SG list + */ +int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr) +{ + struct page **pages; + + if (userptr->dma_mapped) + hdev->asic_funcs->hl_dma_unmap_sg(hdev, + userptr->sgt->sgl, + userptr->sgt->nents, + userptr->dir); + + pages = frame_vector_pages(userptr->vec); + if (!IS_ERR(pages)) { + int i; + + for (i = 0; i < frame_vector_count(userptr->vec); i++) + set_page_dirty_lock(pages[i]); + } + put_vaddr_frames(userptr->vec); + frame_vector_destroy(userptr->vec); + + list_del(&userptr->job_node); + + sg_free_table(userptr->sgt); + kfree(userptr->sgt); + + return 0; +} + +/* + * hl_userptr_delete_list - clear userptr list + * + * @hdev : pointer to the habanalabs device structure + * @userptr_list : pointer to the list to clear + * + * This function does the following: + * - Iterates over the list and unpins the host memory and frees the userptr + * structure. + */ +void hl_userptr_delete_list(struct hl_device *hdev, + struct list_head *userptr_list) +{ + struct hl_userptr *userptr, *tmp; + + list_for_each_entry_safe(userptr, tmp, userptr_list, job_node) { + hl_unpin_host_memory(hdev, userptr); + kfree(userptr); + } + + INIT_LIST_HEAD(userptr_list); +} + +/* + * hl_userptr_is_pinned - returns whether the given userptr is pinned + * + * @hdev : pointer to the habanalabs device structure + * @userptr_list : pointer to the list to clear + * @userptr : pointer to userptr to check + * + * This function does the following: + * - Iterates over the list and checks if the given userptr is in it, means is + * pinned. If so, returns true, otherwise returns false. + */ +bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, + u32 size, struct list_head *userptr_list, + struct hl_userptr **userptr) +{ + list_for_each_entry((*userptr), userptr_list, job_node) { + if ((addr == (*userptr)->addr) && (size == (*userptr)->size)) + return true; + } + + return false; +} diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 756266cf0416..fba49417f607 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -73,6 +73,95 @@ union hl_cb_args { struct hl_cb_out out; }; +/* + * This structure size must always be fixed to 64-bytes for backward + * compatibility + */ +struct hl_cs_chunk { + /* + * For external queue, this represents a Handle of CB on the Host + * For internal queue, this represents an SRAM or DRAM address of the + * internal CB + */ + __u64 cb_handle; + /* Index of queue to put the CB on */ + __u32 queue_index; + /* + * Size of command buffer with valid packets + * Can be smaller then actual CB size + */ + __u32 cb_size; + /* HL_CS_CHUNK_FLAGS_* */ + __u32 cs_chunk_flags; + /* Align structure to 64 bytes */ + __u32 pad[11]; +}; + +#define HL_CS_FLAGS_FORCE_RESTORE 0x1 + +#define HL_CS_STATUS_SUCCESS 0 + +struct hl_cs_in { + /* this holds address of array of hl_cs_chunk for restore phase */ + __u64 chunks_restore; + /* this holds address of array of hl_cs_chunk for execution phase */ + __u64 chunks_execute; + /* this holds address of array of hl_cs_chunk for store phase - + * Currently not in use + */ + __u64 chunks_store; + /* Number of chunks in restore phase array */ + __u32 num_chunks_restore; + /* Number of chunks in execution array */ + __u32 num_chunks_execute; + /* Number of chunks in restore phase array - Currently not in use */ + __u32 num_chunks_store; + /* HL_CS_FLAGS_* */ + __u32 cs_flags; + /* Context ID - Currently not in use */ + __u32 ctx_id; +}; + +struct hl_cs_out { + /* this holds the sequence number of the CS to pass to wait ioctl */ + __u64 seq; + /* HL_CS_STATUS_* */ + __u32 status; + __u32 pad; +}; + +union hl_cs_args { + struct hl_cs_in in; + struct hl_cs_out out; +}; + +struct hl_wait_cs_in { + /* Command submission sequence number */ + __u64 seq; + /* Absolute timeout to wait in microseconds */ + __u64 timeout_us; + /* Context ID - Currently not in use */ + __u32 ctx_id; + __u32 pad; +}; + +#define HL_WAIT_CS_STATUS_COMPLETED 0 +#define HL_WAIT_CS_STATUS_BUSY 1 +#define HL_WAIT_CS_STATUS_TIMEDOUT 2 +#define HL_WAIT_CS_STATUS_ABORTED 3 +#define HL_WAIT_CS_STATUS_INTERRUPTED 4 + +struct hl_wait_cs_out { + /* HL_WAIT_CS_STATUS_* */ + __u32 status; + __u32 pad; +}; + +union hl_wait_cs_args { + struct hl_wait_cs_in in; + struct hl_wait_cs_out out; +}; + /* * Command Buffer * - Request a Command Buffer @@ -89,7 +178,74 @@ union hl_cb_args { #define HL_IOCTL_CB \ _IOWR('H', 0x02, union hl_cb_args) +/* + * Command Submission + * + * To submit work to the device, the user need to call this IOCTL with a set + * of JOBS. That set of JOBS constitutes a CS object. + * Each JOB will be enqueued on a specific queue, according to the user's input. + * There can be more then one JOB per queue. + * + * There are two types of queues - external and internal. External queues + * are DMA queues which transfer data from/to the Host. All other queues are + * internal. The driver will get completion notifications from the device only + * on JOBS which are enqueued in the external queues. + * + * This IOCTL is asynchronous in regard to the actual execution of the CS. This + * means it returns immediately after ALL the JOBS were enqueued on their + * relevant queues. Therefore, the user mustn't assume the CS has been completed + * or has even started to execute. + * + * Upon successful enqueue, the IOCTL returns an opaque handle which the user + * can use with the "Wait for CS" IOCTL to check whether the handle's CS + * external JOBS have been completed. Note that if the CS has internal JOBS + * which can execute AFTER the external JOBS have finished, the driver might + * report that the CS has finished executing BEFORE the internal JOBS have + * actually finish executing. + * + * The CS IOCTL will receive three sets of JOBS. One set is for "restore" phase, + * a second set is for "execution" phase and a third set is for "store" phase. + * The JOBS on the "restore" phase are enqueued only after context-switch + * (or if its the first CS for this context). The user can also order the + * driver to run the "restore" phase explicitly + * + */ +#define HL_IOCTL_CS \ + _IOWR('H', 0x03, union hl_cs_args) + +/* + * Wait for Command Submission + * + * The user can call this IOCTL with a handle it received from the CS IOCTL + * to wait until the handle's CS has finished executing. The user will wait + * inside the kernel until the CS has finished or until the user-requeusted + * timeout has expired. + * + * The return value of the IOCTL is a standard Linux error code. The possible + * values are: + * + * EINTR - Kernel waiting has been interrupted, e.g. due to OS signal + * that the user process received + * ETIMEDOUT - The CS has caused a timeout on the device + * EIO - The CS was aborted (usually because the device was reset) + * ENODEV - The device wants to do hard-reset (so user need to close FD) + * + * The driver also returns a custom define inside the IOCTL which can be: + * + * HL_WAIT_CS_STATUS_COMPLETED - The CS has been completed successfully (0) + * HL_WAIT_CS_STATUS_BUSY - The CS is still executing (0) + * HL_WAIT_CS_STATUS_TIMEDOUT - The CS has caused a timeout on the device + * (ETIMEDOUT) + * HL_WAIT_CS_STATUS_ABORTED - The CS was aborted, usually because the + * device was reset (EIO) + * HL_WAIT_CS_STATUS_INTERRUPTED - Waiting for the CS was interrupted (EINTR) + * + */ + +#define HL_IOCTL_WAIT_CS \ + _IOWR('H', 0x04, union hl_wait_cs_args) + #define HL_COMMAND_START 0x02 -#define HL_COMMAND_END 0x03 +#define HL_COMMAND_END 0x05 #endif /* HABANALABS_H_ */ -- cgit v1.2.3-71-gd317 From 0feaf86d4e69507ab9b2af7dcc63a6886352d5db Mon Sep 17 00:00:00 2001 From: Omer Shpigelman Date: Sat, 16 Feb 2019 00:39:22 +0200 Subject: habanalabs: add virtual memory and MMU modules This patch adds the Virtual Memory and MMU modules. Goya has an internal MMU which provides process isolation on the internal DDR. The internal MMU also performs translations for transactions that go from Goya to the Host. The driver is responsible for allocating and freeing memory on the DDR upon user request. It also provides an interface to map and unmap DDR and Host memory to the device address space. The MMU in Goya supports 3-level and 4-level page tables. With 3-level, the size of each page is 2MB, while with 4-level the size of each page is 4KB. In the DDR, the physical pages are always 2MB. Reviewed-by: Mike Rapoport Signed-off-by: Omer Shpigelman Signed-off-by: Oded Gabbay Signed-off-by: Greg Kroah-Hartman --- drivers/misc/habanalabs/Makefile | 2 +- drivers/misc/habanalabs/context.c | 19 +- drivers/misc/habanalabs/device.c | 20 +- drivers/misc/habanalabs/goya/goya.c | 395 +++++ drivers/misc/habanalabs/habanalabs.h | 189 ++- drivers/misc/habanalabs/habanalabs_drv.c | 2 +- drivers/misc/habanalabs/habanalabs_ioctl.c | 3 +- .../habanalabs/include/hw_ip/mmu/mmu_general.h | 46 + .../misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h | 15 + drivers/misc/habanalabs/memory.c | 1517 ++++++++++++++++++++ drivers/misc/habanalabs/mmu.c | 691 +++++++++ include/uapi/misc/habanalabs.h | 122 +- 12 files changed, 3013 insertions(+), 8 deletions(-) create mode 100644 drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h create mode 100644 drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h create mode 100644 drivers/misc/habanalabs/mmu.c (limited to 'include') diff --git a/drivers/misc/habanalabs/Makefile b/drivers/misc/habanalabs/Makefile index d2fd0e18b1eb..fd46f8b48bab 100644 --- a/drivers/misc/habanalabs/Makefile +++ b/drivers/misc/habanalabs/Makefile @@ -6,7 +6,7 @@ obj-m := habanalabs.o habanalabs-y := habanalabs_drv.o device.o context.o asid.o habanalabs_ioctl.o \ command_buffer.o hw_queue.o irq.o sysfs.o hwmon.o memory.o \ - command_submission.o + command_submission.o mmu.o include $(src)/goya/Makefile habanalabs-y += $(HL_GOYA_FILES) diff --git a/drivers/misc/habanalabs/context.c b/drivers/misc/habanalabs/context.c index c3854714b46c..619ace1c4ef7 100644 --- a/drivers/misc/habanalabs/context.c +++ b/drivers/misc/habanalabs/context.c @@ -25,8 +25,10 @@ static void hl_ctx_fini(struct hl_ctx *ctx) for (i = 0 ; i < HL_MAX_PENDING_CS ; i++) dma_fence_put(ctx->cs_pending[i]); - if (ctx->asid != HL_KERNEL_ASID_ID) + if (ctx->asid != HL_KERNEL_ASID_ID) { + hl_vm_ctx_fini(ctx); hl_asid_free(hdev, ctx->asid); + } } void hl_ctx_do_release(struct kref *ref) @@ -96,6 +98,8 @@ void hl_ctx_free(struct hl_device *hdev, struct hl_ctx *ctx) int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx) { + int rc = 0; + ctx->hdev = hdev; kref_init(&ctx->refcount); @@ -113,9 +117,22 @@ int hl_ctx_init(struct hl_device *hdev, struct hl_ctx *ctx, bool is_kernel_ctx) dev_err(hdev->dev, "No free ASID, failed to create context\n"); return -ENOMEM; } + + rc = hl_vm_ctx_init(ctx); + if (rc) { + dev_err(hdev->dev, "Failed to init mem ctx module\n"); + rc = -ENOMEM; + goto mem_ctx_err; + } } return 0; + +mem_ctx_err: + if (ctx->asid != HL_KERNEL_ASID_ID) + hl_asid_free(hdev, ctx->asid); + + return rc; } void hl_ctx_get(struct hl_device *hdev, struct hl_ctx *ctx) diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c index cc5f068df597..d0929022655b 100644 --- a/drivers/misc/habanalabs/device.c +++ b/drivers/misc/habanalabs/device.c @@ -615,8 +615,10 @@ again: /* Reset the H/W. It will be in idle state after this returns */ hdev->asic_funcs->hw_fini(hdev, hard_reset); - if (hard_reset) + if (hard_reset) { + hl_vm_fini(hdev); hl_eq_reset(hdev, &hdev->event_queue); + } /* Re-initialize PI,CI to 0 in all queues (hw queue, cq) */ hl_hw_queue_reset(hdev, hard_reset); @@ -677,6 +679,13 @@ again: goto out_err; } + rc = hl_vm_init(hdev); + if (rc) { + dev_err(hdev->dev, + "Failed to init memory module after hard reset\n"); + goto out_err; + } + hl_set_max_power(hdev, hdev->max_power); hdev->hard_reset_pending = false; @@ -861,6 +870,13 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass) hdev->asic_name, hdev->asic_prop.dram_size / 1024 / 1024 / 1024); + rc = hl_vm_init(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to initialize memory module\n"); + rc = 0; + goto out_disabled; + } + /* * hl_hwmon_init must be called after device_late_init, because only * there we get the information from the device about which @@ -977,6 +993,8 @@ void hl_device_fini(struct hl_device *hdev) /* Reset the H/W. It will be in idle state after this returns */ hdev->asic_funcs->hw_fini(hdev, true); + hl_vm_fini(hdev); + hl_eq_fini(hdev, &hdev->event_queue); for (i = 0 ; i < hdev->asic_prop.completion_queues_count ; i++) diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index e3878fd7dc94..89b82b989966 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -6,6 +6,8 @@ */ #include "goyaP.h" +#include "include/hw_ip/mmu/mmu_general.h" +#include "include/hw_ip/mmu/mmu_v1_0.h" #include "include/goya/asic_reg/goya_masks.h" #include @@ -80,6 +82,7 @@ #define GOYA_PLDM_RESET_WAIT_MSEC 1000 /* 1s */ #define GOYA_CPU_TIMEOUT_USEC 10000000 /* 10s */ #define GOYA_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */ +#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100) #define GOYA_QMAN0_FENCE_VAL 0xD169B243 @@ -131,6 +134,70 @@ static const char *goya_axi_name[GOYA_MAX_INITIATORS] = { "MMU" }; +static u64 goya_mmu_regs[GOYA_MMU_REGS_NUM] = { + mmDMA_QM_0_GLBL_NON_SECURE_PROPS, + mmDMA_QM_1_GLBL_NON_SECURE_PROPS, + mmDMA_QM_2_GLBL_NON_SECURE_PROPS, + mmDMA_QM_3_GLBL_NON_SECURE_PROPS, + mmDMA_QM_4_GLBL_NON_SECURE_PROPS, + mmTPC0_QM_GLBL_SECURE_PROPS, + mmTPC0_QM_GLBL_NON_SECURE_PROPS, + mmTPC0_CMDQ_GLBL_SECURE_PROPS, + mmTPC0_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC0_CFG_ARUSER, + mmTPC0_CFG_AWUSER, + mmTPC1_QM_GLBL_SECURE_PROPS, + mmTPC1_QM_GLBL_NON_SECURE_PROPS, + mmTPC1_CMDQ_GLBL_SECURE_PROPS, + mmTPC1_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC1_CFG_ARUSER, + mmTPC1_CFG_AWUSER, + mmTPC2_QM_GLBL_SECURE_PROPS, + mmTPC2_QM_GLBL_NON_SECURE_PROPS, + mmTPC2_CMDQ_GLBL_SECURE_PROPS, + mmTPC2_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC2_CFG_ARUSER, + mmTPC2_CFG_AWUSER, + mmTPC3_QM_GLBL_SECURE_PROPS, + mmTPC3_QM_GLBL_NON_SECURE_PROPS, + mmTPC3_CMDQ_GLBL_SECURE_PROPS, + mmTPC3_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC3_CFG_ARUSER, + mmTPC3_CFG_AWUSER, + mmTPC4_QM_GLBL_SECURE_PROPS, + mmTPC4_QM_GLBL_NON_SECURE_PROPS, + mmTPC4_CMDQ_GLBL_SECURE_PROPS, + mmTPC4_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC4_CFG_ARUSER, + mmTPC4_CFG_AWUSER, + mmTPC5_QM_GLBL_SECURE_PROPS, + mmTPC5_QM_GLBL_NON_SECURE_PROPS, + mmTPC5_CMDQ_GLBL_SECURE_PROPS, + mmTPC5_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC5_CFG_ARUSER, + mmTPC5_CFG_AWUSER, + mmTPC6_QM_GLBL_SECURE_PROPS, + mmTPC6_QM_GLBL_NON_SECURE_PROPS, + mmTPC6_CMDQ_GLBL_SECURE_PROPS, + mmTPC6_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC6_CFG_ARUSER, + mmTPC6_CFG_AWUSER, + mmTPC7_QM_GLBL_SECURE_PROPS, + mmTPC7_QM_GLBL_NON_SECURE_PROPS, + mmTPC7_CMDQ_GLBL_SECURE_PROPS, + mmTPC7_CMDQ_GLBL_NON_SECURE_PROPS, + mmTPC7_CFG_ARUSER, + mmTPC7_CFG_AWUSER, + mmMME_QM_GLBL_SECURE_PROPS, + mmMME_QM_GLBL_NON_SECURE_PROPS, + mmMME_CMDQ_GLBL_SECURE_PROPS, + mmMME_CMDQ_GLBL_NON_SECURE_PROPS, + mmMME_SBA_CONTROL_DATA, + mmMME_SBB_CONTROL_DATA, + mmMME_SBC_CONTROL_DATA, + mmMME_WBC_CONTROL_DATA +}; + #define GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE 121 static u32 goya_non_fatal_events[GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE] = { @@ -258,6 +325,10 @@ static u32 goya_non_fatal_events[GOYA_ASYC_EVENT_GROUP_NON_FATAL_SIZE] = { }; static int goya_armcp_info_get(struct hl_device *hdev); +static void goya_mmu_prepare(struct hl_device *hdev, u32 asid); +static int goya_mmu_clear_pgt_range(struct hl_device *hdev); +static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid, + u64 phys_addr); static void goya_get_fixed_properties(struct hl_device *hdev) { @@ -296,6 +367,16 @@ static void goya_get_fixed_properties(struct hl_device *hdev) prop->sram_user_base_address = prop->sram_base_address + SRAM_USER_BASE_OFFSET; + prop->mmu_pgt_addr = MMU_PAGE_TABLES_ADDR; + if (hdev->pldm) + prop->mmu_pgt_size = 0x800000; /* 8MB */ + else + prop->mmu_pgt_size = MMU_PAGE_TABLES_SIZE; + prop->mmu_pte_size = HL_PTE_SIZE; + prop->mmu_hop_table_size = HOP_TABLE_SIZE; + prop->mmu_hop0_tables_total_size = HOP0_TABLES_TOTAL_SIZE; + prop->dram_page_size = PAGE_SIZE_2MB; + prop->host_phys_base_address = HOST_PHYS_BASE; prop->va_space_host_start_address = VA_HOST_SPACE_START; prop->va_space_host_end_address = VA_HOST_SPACE_END; @@ -752,7 +833,18 @@ static int goya_late_init(struct hl_device *hdev) goya_fetch_psoc_frequency(hdev); + rc = goya_mmu_clear_pgt_range(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to clear MMU page tables range\n"); + goto disable_pci_access; + } + return 0; + +disable_pci_access: + goya_send_pci_access_msg(hdev, ARMCP_PACKET_DISABLE_PCI_ACCESS); + + return rc; } /* @@ -2565,6 +2657,54 @@ out: return 0; } +static int goya_mmu_init(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct goya_device *goya = hdev->asic_specific; + u64 hop0_addr; + int rc, i; + + if (!hdev->mmu_enable) + return 0; + + if (goya->hw_cap_initialized & HW_CAP_MMU) + return 0; + + hdev->dram_supports_virtual_memory = true; + + for (i = 0 ; i < prop->max_asid ; i++) { + hop0_addr = prop->mmu_pgt_addr + + (i * prop->mmu_hop_table_size); + + rc = goya_mmu_update_asid_hop0_addr(hdev, i, hop0_addr); + if (rc) { + dev_err(hdev->dev, + "failed to set hop0 addr for asid %d\n", i); + goto err; + } + } + + goya->hw_cap_initialized |= HW_CAP_MMU; + + /* init MMU cache manage page */ + WREG32(mmSTLB_CACHE_INV_BASE_39_8, MMU_CACHE_MNG_ADDR >> 8); + WREG32(mmSTLB_CACHE_INV_BASE_49_40, MMU_CACHE_MNG_ADDR << 40); + + /* Remove follower feature due to performance bug */ + WREG32_AND(mmSTLB_STLB_FEATURE_EN, + (~STLB_STLB_FEATURE_EN_FOLLOWER_EN_MASK)); + + hdev->asic_funcs->mmu_invalidate_cache(hdev, true); + + WREG32(mmMMU_MMU_ENABLE, 1); + WREG32(mmMMU_SPI_MASK, 0xF); + + return 0; + +err: + return rc; +} + /* * goya_hw_init - Goya hardware initialization code * @@ -2614,6 +2754,10 @@ static int goya_hw_init(struct hl_device *hdev) return rc; } + rc = goya_mmu_init(hdev); + if (rc) + return rc; + goya_init_security(hdev); goya_init_dma_qmans(hdev); @@ -4249,6 +4393,10 @@ int goya_context_switch(struct hl_device *hdev, u32 asid) rc = goya_send_job_on_qman0(hdev, job); + /* no point in setting the asid in case of failure */ + if (!rc) + goya_mmu_prepare(hdev, asid); + job->patched_cb->cs_cnt--; hl_cb_put(job->patched_cb); @@ -4284,6 +4432,22 @@ void goya_restore_phase_topology(struct hl_device *hdev) i = RREG32(mmSYNC_MNGR_SOB_OBJ_0); } +static u64 goya_read_pte(struct hl_device *hdev, u64 addr) +{ + struct goya_device *goya = hdev->asic_specific; + + return readq(hdev->pcie_bar[DDR_BAR_ID] + + (addr - goya->ddr_bar_cur_addr)); +} + +static void goya_write_pte(struct hl_device *hdev, u64 addr, u64 val) +{ + struct goya_device *goya = hdev->asic_specific; + + writeq(val, hdev->pcie_bar[DDR_BAR_ID] + + (addr - goya->ddr_bar_cur_addr)); +} + static void goya_get_axi_name(struct hl_device *hdev, u32 agent_id, u16 event_type, char *axi_name, int len) { @@ -4567,6 +4731,233 @@ void *goya_get_events_stat(struct hl_device *hdev, u32 *size) return goya->events_stat; } +static int goya_mmu_clear_pgt_range(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct goya_device *goya = hdev->asic_specific; + struct packet_lin_dma *clear_pgt_range_pkt; + struct hl_cs_parser parser; + struct hl_cs_job *job; + u32 cb_size; + struct hl_cb *cb; + int rc; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) + return 0; + + cb = hl_cb_kernel_create(hdev, PAGE_SIZE); + if (!cb) + return -EFAULT; + + clear_pgt_range_pkt = (struct packet_lin_dma *) + (uintptr_t) cb->kernel_address; + + memset(clear_pgt_range_pkt, 0, sizeof(*clear_pgt_range_pkt)); + cb_size = sizeof(*clear_pgt_range_pkt); + + clear_pgt_range_pkt->ctl = + ((PACKET_LIN_DMA << GOYA_PKT_CTL_OPCODE_SHIFT) | + (DMA_HOST_TO_DRAM << GOYA_PKT_LIN_DMA_CTL_DMA_DIR_SHIFT) | + (1 << GOYA_PKT_LIN_DMA_CTL_MEMSET_SHIFT) | + (1 << GOYA_PKT_LIN_DMA_CTL_WO_SHIFT) | + (1 << GOYA_PKT_CTL_RB_SHIFT) | + (1 << GOYA_PKT_CTL_MB_SHIFT)); + + clear_pgt_range_pkt->src_addr = 0; + clear_pgt_range_pkt->dst_addr = prop->mmu_pgt_addr; + clear_pgt_range_pkt->tsize = prop->mmu_pgt_size + MMU_CACHE_MNG_SIZE; + + job = hl_cs_allocate_job(hdev, true); + if (!job) { + dev_err(hdev->dev, "Failed to allocate a new job\n"); + rc = -ENOMEM; + goto release_cb; + } + + job->id = 0; + job->user_cb = cb; + job->user_cb->cs_cnt++; + job->user_cb_size = cb_size; + job->hw_queue_id = GOYA_QUEUE_ID_DMA_0; + + parser.ctx_id = HL_KERNEL_ASID_ID; + parser.cs_sequence = 0; + parser.job_id = job->id; + parser.hw_queue_id = job->hw_queue_id; + parser.job_userptr_list = &job->userptr_list; + parser.user_cb = job->user_cb; + parser.user_cb_size = job->user_cb_size; + parser.ext_queue = job->ext_queue; + parser.use_virt_addr = hdev->mmu_enable; + + rc = hdev->asic_funcs->cs_parser(hdev, &parser); + if (rc) { + dev_err(hdev->dev, + "Failed to parse kernel CB when clearing pgt\n"); + goto free_job; + } + + job->patched_cb = parser.patched_cb; + job->job_cb_size = parser.patched_cb_size; + job->patched_cb->cs_cnt++; + + rc = goya_send_job_on_qman0(hdev, job); + + job->patched_cb->cs_cnt--; + hl_cb_put(job->patched_cb); + +free_job: + hl_userptr_delete_list(hdev, &job->userptr_list); + kfree(job); + cb->cs_cnt--; + +release_cb: + hl_cb_put(cb); + hl_cb_destroy(hdev, &hdev->kernel_cb_mgr, cb->id << PAGE_SHIFT); + + return rc; +} + +static void goya_mmu_prepare(struct hl_device *hdev, u32 asid) +{ + struct goya_device *goya = hdev->asic_specific; + int i; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) + return; + + if (asid & ~MME_QM_GLBL_SECURE_PROPS_ASID_MASK) { + WARN(1, "asid %u is too big\n", asid); + return; + } + + /* zero the MMBP and ASID bits and then set the ASID */ + for (i = 0 ; i < GOYA_MMU_REGS_NUM ; i++) { + WREG32_AND(goya_mmu_regs[i], ~0x7FF); + WREG32_OR(goya_mmu_regs[i], asid); + } +} + +static void goya_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard) +{ + struct goya_device *goya = hdev->asic_specific; + u32 status, timeout_usec; + int rc; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) + return; + + /* no need in L1 only invalidation in Goya */ + if (!is_hard) + return; + + if (hdev->pldm) + timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC; + else + timeout_usec = MMU_CONFIG_TIMEOUT_USEC; + + mutex_lock(&hdev->mmu_cache_lock); + + /* L0 & L1 invalidation */ + WREG32(mmSTLB_INV_ALL_START, 1); + + rc = hl_poll_timeout( + hdev, + mmSTLB_INV_ALL_START, + status, + !status, + 1000, + timeout_usec); + + mutex_unlock(&hdev->mmu_cache_lock); + + if (rc) + dev_notice_ratelimited(hdev->dev, + "Timeout when waiting for MMU cache invalidation\n"); +} + +static void goya_mmu_invalidate_cache_range(struct hl_device *hdev, + bool is_hard, u32 asid, u64 va, u64 size) +{ + struct goya_device *goya = hdev->asic_specific; + u32 status, timeout_usec, inv_data, pi; + int rc; + + if (!(goya->hw_cap_initialized & HW_CAP_MMU)) + return; + + /* no need in L1 only invalidation in Goya */ + if (!is_hard) + return; + + if (hdev->pldm) + timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC; + else + timeout_usec = MMU_CONFIG_TIMEOUT_USEC; + + mutex_lock(&hdev->mmu_cache_lock); + + /* + * TODO: currently invalidate entire L0 & L1 as in regular hard + * invalidation. Need to apply invalidation of specific cache lines with + * mask of ASID & VA & size. + * Note that L1 with be flushed entirely in any case. + */ + + /* L0 & L1 invalidation */ + inv_data = RREG32(mmSTLB_CACHE_INV); + /* PI is 8 bit */ + pi = ((inv_data & STLB_CACHE_INV_PRODUCER_INDEX_MASK) + 1) & 0xFF; + WREG32(mmSTLB_CACHE_INV, + (inv_data & STLB_CACHE_INV_INDEX_MASK_MASK) | pi); + + rc = hl_poll_timeout( + hdev, + mmSTLB_INV_CONSUMER_INDEX, + status, + status == pi, + 1000, + timeout_usec); + + mutex_unlock(&hdev->mmu_cache_lock); + + if (rc) + dev_notice_ratelimited(hdev->dev, + "Timeout when waiting for MMU cache invalidation\n"); +} + +static int goya_mmu_update_asid_hop0_addr(struct hl_device *hdev, u32 asid, + u64 phys_addr) +{ + u32 status, timeout_usec; + int rc; + + if (hdev->pldm) + timeout_usec = GOYA_PLDM_MMU_TIMEOUT_USEC; + else + timeout_usec = MMU_CONFIG_TIMEOUT_USEC; + + WREG32(MMU_HOP0_PA43_12, phys_addr >> MMU_HOP0_PA43_12_SHIFT); + WREG32(MMU_HOP0_PA49_44, phys_addr >> MMU_HOP0_PA49_44_SHIFT); + WREG32(MMU_ASID_BUSY, 0x80000000 | asid); + + rc = hl_poll_timeout( + hdev, + MMU_ASID_BUSY, + status, + !(status & 0x80000000), + 1000, + timeout_usec); + + if (rc) { + dev_err(hdev->dev, + "Timeout during MMU hop0 config of asid %d\n", asid); + return rc; + } + + return 0; +} + int goya_send_heartbeat(struct hl_device *hdev) { struct goya_device *goya = hdev->asic_specific; @@ -4830,6 +5221,10 @@ static const struct hl_asic_funcs goya_funcs = { .handle_eqe = goya_handle_eqe, .set_pll_profile = goya_set_pll_profile, .get_events_stat = goya_get_events_stat, + .read_pte = goya_read_pte, + .write_pte = goya_write_pte, + .mmu_invalidate_cache = goya_mmu_invalidate_cache, + .mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range, .send_heartbeat = goya_send_heartbeat, .enable_clock_gating = goya_init_clock_gating, .disable_clock_gating = goya_disable_clock_gating, diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h index 9adc7c6ec08b..03085e7a12dd 100644 --- a/drivers/misc/habanalabs/habanalabs.h +++ b/drivers/misc/habanalabs/habanalabs.h @@ -19,6 +19,7 @@ #include #include #include +#include #define HL_NAME "habanalabs" @@ -39,6 +40,31 @@ /* MUST BE POWER OF 2 and larger than 1 */ #define HL_MAX_PENDING_CS 64 +/* Memory */ +#define MEM_HASH_TABLE_BITS 7 /* 1 << 7 buckets */ + +/* MMU */ +#define MMU_HASH_TABLE_BITS 7 /* 1 << 7 buckets */ + +/** + * struct pgt_info - MMU hop page info. + * @node: hash linked-list node for the pgts hash of pgts. + * @addr: physical address of the pgt. + * @ctx: pointer to the owner ctx. + * @num_of_ptes: indicates how many ptes are used in the pgt. + * + * The MMU page tables hierarchy is placed on the DRAM. When a new level (hop) + * is needed during mapping, a new page is allocated and this structure holds + * its essential information. During unmapping, if no valid PTEs remained in the + * page, it is freed with its pgt_info structure. + */ +struct pgt_info { + struct hlist_node node; + u64 addr; + struct hl_ctx *ctx; + int num_of_ptes; +}; + struct hl_device; struct hl_fpriv; @@ -72,11 +98,11 @@ struct hw_queue_properties { /** * enum vm_type_t - virtual memory mapping request information. * @VM_TYPE_USERPTR: mapping of user memory to device virtual address. - * @VM_TYPE_PHYS_LIST: mapping of DRAM memory to device virtual address. + * @VM_TYPE_PHYS_PACK: mapping of DRAM memory to device virtual address. */ enum vm_type_t { VM_TYPE_USERPTR, - VM_TYPE_PHYS_LIST + VM_TYPE_PHYS_PACK }; /** @@ -117,6 +143,12 @@ enum hl_device_hw_state { * mapping DRAM memory. * @va_space_dram_end_address: end address of virtual memory range for * mapping DRAM memory. + * @mmu_pgt_addr: base physical address in DRAM of MMU page tables. + * @mmu_pgt_size: MMU page tables total size. + * @mmu_pte_size: PTE size in MMU page tables. + * @mmu_hop_table_size: MMU hop table size. + * @mmu_hop0_tables_total_size: total size of MMU hop0 tables. + * @dram_page_size: page size for MMU DRAM allocation. * @cfg_size: configuration space size on SRAM. * @sram_size: total size of SRAM. * @max_asid: maximum number of open contexts (ASIDs). @@ -150,6 +182,12 @@ struct asic_fixed_properties { u64 va_space_host_end_address; u64 va_space_dram_start_address; u64 va_space_dram_end_address; + u64 mmu_pgt_addr; + u32 mmu_pgt_size; + u32 mmu_pte_size; + u32 mmu_hop_table_size; + u32 mmu_hop0_tables_total_size; + u32 dram_page_size; u32 cfg_size; u32 sram_size; u32 max_asid; @@ -419,6 +457,12 @@ enum hl_pll_frequency { * @handle_eqe: handle event queue entry (IRQ) from ArmCP. * @set_pll_profile: change PLL profile (manual/automatic). * @get_events_stat: retrieve event queue entries histogram. + * @read_pte: read MMU page table entry from DRAM. + * @write_pte: write MMU page table entry to DRAM. + * @mmu_invalidate_cache: flush MMU STLB cache, either with soft (L1 only) or + * hard (L0 & L1) flush. + * @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with + * ASID-VA-size mask. * @send_heartbeat: send is-alive packet to ArmCP and verify response. * @enable_clock_gating: enable clock gating for reducing power consumption. * @disable_clock_gating: disable clock for accessing registers on HBW. @@ -483,6 +527,11 @@ struct hl_asic_funcs { void (*set_pll_profile)(struct hl_device *hdev, enum hl_pll_frequency freq); void* (*get_events_stat)(struct hl_device *hdev, u32 *size); + u64 (*read_pte)(struct hl_device *hdev, u64 addr); + void (*write_pte)(struct hl_device *hdev, u64 addr, u64 val); + void (*mmu_invalidate_cache)(struct hl_device *hdev, bool is_hard); + void (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard, + u32 asid, u64 va, u64 size); int (*send_heartbeat)(struct hl_device *hdev); void (*enable_clock_gating)(struct hl_device *hdev); void (*disable_clock_gating)(struct hl_device *hdev); @@ -504,17 +553,40 @@ struct hl_asic_funcs { #define HL_KERNEL_ASID_ID 0 +/** + * struct hl_va_range - virtual addresses range. + * @lock: protects the virtual addresses list. + * @list: list of virtual addresses blocks available for mappings. + * @start_addr: range start address. + * @end_addr: range end address. + */ +struct hl_va_range { + struct mutex lock; + struct list_head list; + u64 start_addr; + u64 end_addr; +}; + /** * struct hl_ctx - user/kernel context. + * @mem_hash: holds mapping from virtual address to virtual memory area + * descriptor (hl_vm_phys_pg_list or hl_userptr). + * @mmu_hash: holds a mapping from virtual address to pgt_info structure. * @hpriv: pointer to the private (KMD) data of the process (fd). * @hdev: pointer to the device structure. * @refcount: reference counter for the context. Context is released only when * this hits 0l. It is incremented on CS and CS_WAIT. * @cs_pending: array of DMA fence objects representing pending CS. + * @host_va_range: holds available virtual addresses for host mappings. + * @dram_va_range: holds available virtual addresses for DRAM mappings. + * @mem_hash_lock: protects the mem_hash. + * @mmu_lock: protects the MMU page tables. Any change to the PGT, modifing the + * MMU hash or walking the PGT requires talking this lock * @cs_sequence: sequence number for CS. Value is assigned to a CS and passed * to user so user could inquire about CS. It is used as * index to cs_pending array. * @cs_lock: spinlock to protect cs_sequence. + * @dram_phys_mem: amount of used physical DRAM memory by this context. * @thread_restore_token: token to prevent multiple threads of the same context * from running the restore phase. Only one thread * should run it. @@ -524,12 +596,19 @@ struct hl_asic_funcs { * @asid: context's unique address space ID in the device's MMU. */ struct hl_ctx { + DECLARE_HASHTABLE(mem_hash, MEM_HASH_TABLE_BITS); + DECLARE_HASHTABLE(mmu_hash, MMU_HASH_TABLE_BITS); struct hl_fpriv *hpriv; struct hl_device *hdev; struct kref refcount; struct dma_fence *cs_pending[HL_MAX_PENDING_CS]; + struct hl_va_range host_va_range; + struct hl_va_range dram_va_range; + struct mutex mem_hash_lock; + struct mutex mmu_lock; u64 cs_sequence; spinlock_t cs_lock; + atomic64_t dram_phys_mem; atomic_t thread_restore_token; u32 thread_restore_wait_token; u32 asid; @@ -672,6 +751,85 @@ struct hl_cs_parser { }; +/* + * MEMORY STRUCTURE + */ + +/** + * struct hl_vm_hash_node - hash element from virtual address to virtual + * memory area descriptor (hl_vm_phys_pg_list or + * hl_userptr). + * @node: node to hang on the hash table in context object. + * @vaddr: key virtual address. + * @ptr: value pointer (hl_vm_phys_pg_list or hl_userptr). + */ +struct hl_vm_hash_node { + struct hlist_node node; + u64 vaddr; + void *ptr; +}; + +/** + * struct hl_vm_phys_pg_pack - physical page pack. + * @vm_type: describes the type of the virtual area descriptor. + * @pages: the physical page array. + * @mapping_cnt: number of shared mappings. + * @asid: the context related to this list. + * @npages: num physical pages in the pack. + * @page_size: size of each page in the pack. + * @total_size: total size of all the pages in this list. + * @flags: HL_MEM_* flags related to this list. + * @handle: the provided handle related to this list. + * @offset: offset from the first page. + * @contiguous: is contiguous physical memory. + * @created_from_userptr: is product of host virtual address. + */ +struct hl_vm_phys_pg_pack { + enum vm_type_t vm_type; /* must be first */ + u64 *pages; + atomic_t mapping_cnt; + u32 asid; + u32 npages; + u32 page_size; + u32 total_size; + u32 flags; + u32 handle; + u32 offset; + u8 contiguous; + u8 created_from_userptr; +}; + +/** + * struct hl_vm_va_block - virtual range block information. + * @node: node to hang on the virtual range list in context object. + * @start: virtual range start address. + * @end: virtual range end address. + * @size: virtual range size. + */ +struct hl_vm_va_block { + struct list_head node; + u64 start; + u64 end; + u64 size; +}; + +/** + * struct hl_vm - virtual memory manager for MMU. + * @dram_pg_pool: pool for DRAM physical pages of 2MB. + * @dram_pg_pool_refcount: reference counter for the pool usage. + * @idr_lock: protects the phys_pg_list_handles. + * @phys_pg_pack_handles: idr to hold all device allocations handles. + * @init_done: whether initialization was done. We need this because VM + * initialization might be skipped during device initialization. + */ +struct hl_vm { + struct gen_pool *dram_pg_pool; + struct kref dram_pg_pool_refcount; + spinlock_t idr_lock; + struct idr phys_pg_pack_handles; + u8 init_done; +}; + /* * FILE PRIVATE STRUCTURE */ @@ -787,12 +945,16 @@ struct hl_device_reset_work { * @asic_prop: ASIC specific immutable properties. * @asic_funcs: ASIC specific functions. * @asic_specific: ASIC specific information to use only from ASIC files. + * @mmu_pgt_pool: pool of available MMU hops. + * @vm: virtual memory manager for MMU. + * @mmu_cache_lock: protects MMU cache invalidation as it can serve one context * @hwmon_dev: H/W monitor device. * @pm_mng_profile: current power management profile. * @hl_chip_info: ASIC's sensors information. * @cb_pool: list of preallocated CBs. * @cb_pool_lock: protects the CB pool. * @user_ctx: current user context executing. + * @dram_used_mem: current DRAM memory consumption. * @in_reset: is device in reset flow. * @curr_pll_profile: current PLL profile. * @fd_open_cnt: number of open user processes. @@ -812,6 +974,7 @@ struct hl_device_reset_work { * @heartbeat: is heartbeat sanity check towards ArmCP enabled. * @reset_on_lockup: true if a reset should be done in case of stuck CS, false * otherwise. + * @dram_supports_virtual_memory: is MMU enabled towards DRAM. * @init_done: is the initialization of the device done. * @mmu_enable: is MMU enabled. */ @@ -846,6 +1009,9 @@ struct hl_device { struct asic_fixed_properties asic_prop; const struct hl_asic_funcs *asic_funcs; void *asic_specific; + struct gen_pool *mmu_pgt_pool; + struct hl_vm vm; + struct mutex mmu_cache_lock; struct device *hwmon_dev; enum hl_pm_mng_profile pm_mng_profile; struct hwmon_chip_info *hl_chip_info; @@ -856,6 +1022,7 @@ struct hl_device { /* TODO: remove user_ctx for multiple process support */ struct hl_ctx *user_ctx; + atomic64_t dram_used_mem; atomic_t in_reset; atomic_t curr_pll_profile; atomic_t fd_open_cnt; @@ -872,6 +1039,7 @@ struct hl_device { u8 hard_reset_pending; u8 heartbeat; u8 reset_on_lockup; + u8 dram_supports_virtual_memory; u8 init_done; /* Parameters for bring-up */ @@ -1021,6 +1189,7 @@ int hl_device_reset(struct hl_device *hdev, bool hard_reset, void hl_hpriv_get(struct hl_fpriv *hpriv); void hl_hpriv_put(struct hl_fpriv *hpriv); int hl_device_set_frequency(struct hl_device *hdev, enum hl_pll_frequency freq); + int hl_build_hwmon_channel_info(struct hl_device *hdev, struct armcp_sensor *sensors_arr); @@ -1048,6 +1217,12 @@ struct hl_cs_job *hl_cs_allocate_job(struct hl_device *hdev, bool ext_queue); void goya_set_asic_funcs(struct hl_device *hdev); +int hl_vm_ctx_init(struct hl_ctx *ctx); +void hl_vm_ctx_fini(struct hl_ctx *ctx); + +int hl_vm_init(struct hl_device *hdev); +void hl_vm_fini(struct hl_device *hdev); + int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u32 size, struct hl_userptr *userptr); int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr); @@ -1057,6 +1232,15 @@ bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, u32 size, struct list_head *userptr_list, struct hl_userptr **userptr); +int hl_mmu_init(struct hl_device *hdev); +void hl_mmu_fini(struct hl_device *hdev); +void hl_mmu_ctx_init(struct hl_ctx *ctx); +void hl_mmu_ctx_fini(struct hl_ctx *ctx); +int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size); +int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size); +void hl_mmu_swap_out(struct hl_ctx *ctx); +void hl_mmu_swap_in(struct hl_ctx *ctx); + long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr); void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq); long hl_get_temperature(struct hl_device *hdev, int sensor_index, u32 attr); @@ -1074,5 +1258,6 @@ long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); int hl_cb_ioctl(struct hl_fpriv *hpriv, void *data); int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data); int hl_cs_wait_ioctl(struct hl_fpriv *hpriv, void *data); +int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data); #endif /* HABANALABSP_H_ */ diff --git a/drivers/misc/habanalabs/habanalabs_drv.c b/drivers/misc/habanalabs/habanalabs_drv.c index 77a1cc85e530..436ccae0989d 100644 --- a/drivers/misc/habanalabs/habanalabs_drv.c +++ b/drivers/misc/habanalabs/habanalabs_drv.c @@ -188,7 +188,7 @@ int create_hdev(struct hl_device **dev, struct pci_dev *pdev, hdev->reset_on_lockup = reset_on_lockup; /* Parameters for bring-up - set them to defaults */ - hdev->mmu_enable = 0; + hdev->mmu_enable = 1; hdev->cpu_enable = 1; hdev->reset_pcilink = 0; hdev->cpu_queues_enable = 1; diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c index 481db1a5e97e..6e4dc5b5e696 100644 --- a/drivers/misc/habanalabs/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/habanalabs_ioctl.c @@ -18,7 +18,8 @@ static const struct hl_ioctl_desc hl_ioctls[] = { HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl), HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl), - HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_cs_wait_ioctl) + HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_cs_wait_ioctl), + HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl) }; #define HL_CORE_IOCTL_COUNT ARRAY_SIZE(hl_ioctls) diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h new file mode 100644 index 000000000000..1bc36aba1426 --- /dev/null +++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_general.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef INCLUDE_MMU_GENERAL_H_ +#define INCLUDE_MMU_GENERAL_H_ + +#define PAGE_SHIFT_4KB 12 +#define PAGE_SHIFT_2MB 21 +#define PAGE_SIZE_2MB (_AC(1, UL) << PAGE_SHIFT_2MB) +#define PAGE_SIZE_4KB (_AC(1, UL) << PAGE_SHIFT_4KB) +#define PAGE_MASK_2MB (~(PAGE_SIZE_2MB - 1)) + +#define PAGE_PRESENT_MASK 0x0000000000001 +#define SWAP_OUT_MASK 0x0000000000004 +#define LAST_MASK 0x0000000000800 +#define PHYS_ADDR_MASK 0x3FFFFFFFFF000ull +#define HOP0_MASK 0x3000000000000ull +#define HOP1_MASK 0x0FF8000000000ull +#define HOP2_MASK 0x0007FC0000000ull +#define HOP3_MASK 0x000003FE00000 +#define HOP4_MASK 0x00000001FF000 +#define OFFSET_MASK 0x0000000000FFF + +#define HOP0_SHIFT 48 +#define HOP1_SHIFT 39 +#define HOP2_SHIFT 30 +#define HOP3_SHIFT 21 +#define HOP4_SHIFT 12 + +#define PTE_PHYS_ADDR_SHIFT 12 +#define PTE_PHYS_ADDR_MASK ~0xFFF + +#define HL_PTE_SIZE sizeof(u64) +#define HOP_TABLE_SIZE PAGE_SIZE_4KB +#define HOP0_TABLES_TOTAL_SIZE (HOP_TABLE_SIZE * MAX_ASID) + +#define MMU_HOP0_PA43_12_SHIFT 12 +#define MMU_HOP0_PA49_44_SHIFT (12 + 32) + +#define MMU_CONFIG_TIMEOUT_USEC 2000 /* 2 ms */ + +#endif /* INCLUDE_MMU_GENERAL_H_ */ diff --git a/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h new file mode 100644 index 000000000000..8539dd041f2c --- /dev/null +++ b/drivers/misc/habanalabs/include/hw_ip/mmu/mmu_v1_0.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Copyright 2016-2018 HabanaLabs, Ltd. + * All Rights Reserved. + * + */ + +#ifndef INCLUDE_MMU_V1_0_H_ +#define INCLUDE_MMU_V1_0_H_ + +#define MMU_HOP0_PA43_12 0x490004 +#define MMU_HOP0_PA49_44 0x490008 +#define MMU_ASID_BUSY 0x490000 + +#endif /* INCLUDE_MMU_V1_0_H_ */ diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c index ad14376a1c25..6650c8085fc6 100644 --- a/drivers/misc/habanalabs/memory.c +++ b/drivers/misc/habanalabs/memory.c @@ -5,10 +5,1198 @@ * All Rights Reserved. */ +#include #include "habanalabs.h" +#include "include/hw_ip/mmu/mmu_general.h" #include #include +#include + +#define PGS_IN_2MB_PAGE (PAGE_SIZE_2MB >> PAGE_SHIFT) +#define HL_MMU_DEBUG 0 + +/* + * The va ranges in context object contain a list with the available chunks of + * device virtual memory. + * There is one range for host allocations and one for DRAM allocations. + * + * On initialization each range contains one chunk of all of its available + * virtual range which is a half of the total device virtual range. + * + * On each mapping of physical pages, a suitable virtual range chunk (with a + * minimum size) is selected from the list. If the chunk size equals the + * requested size, the chunk is returned. Otherwise, the chunk is split into + * two chunks - one to return as result and a remainder to stay in the list. + * + * On each Unmapping of a virtual address, the relevant virtual chunk is + * returned to the list. The chunk is added to the list and if its edges match + * the edges of the adjacent chunks (means a contiguous chunk can be created), + * the chunks are merged. + * + * On finish, the list is checked to have only one chunk of all the relevant + * virtual range (which is a half of the device total virtual range). + * If not (means not all mappings were unmapped), a warning is printed. + */ + +/* + * alloc_device_memory - allocate device memory + * + * @ctx : current context + * @args : host parameters containing the requested size + * @ret_handle : result handle + * + * This function does the following: + * - Allocate the requested size rounded up to 2MB pages + * - Return unique handle + */ +static int alloc_device_memory(struct hl_ctx *ctx, struct hl_mem_in *args, + u32 *ret_handle) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm *vm = &hdev->vm; + struct hl_vm_phys_pg_pack *phys_pg_pack; + u64 paddr = 0; + u32 total_size, num_pgs, num_curr_pgs, page_size, page_shift; + int handle, rc, i; + bool contiguous; + + num_curr_pgs = 0; + page_size = hdev->asic_prop.dram_page_size; + page_shift = __ffs(page_size); + num_pgs = (args->alloc.mem_size + (page_size - 1)) >> page_shift; + total_size = num_pgs << page_shift; + + contiguous = args->flags & HL_MEM_CONTIGUOUS; + + if (contiguous) { + paddr = (u64) gen_pool_alloc(vm->dram_pg_pool, total_size); + if (!paddr) { + dev_err(hdev->dev, + "failed to allocate %u huge contiguous pages\n", + num_pgs); + return -ENOMEM; + } + } + + phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL); + if (!phys_pg_pack) { + rc = -ENOMEM; + goto pages_pack_err; + } + + phys_pg_pack->vm_type = VM_TYPE_PHYS_PACK; + phys_pg_pack->asid = ctx->asid; + phys_pg_pack->npages = num_pgs; + phys_pg_pack->page_size = page_size; + phys_pg_pack->total_size = total_size; + phys_pg_pack->flags = args->flags; + phys_pg_pack->contiguous = contiguous; + + phys_pg_pack->pages = kcalloc(num_pgs, sizeof(u64), GFP_KERNEL); + if (!phys_pg_pack->pages) { + rc = -ENOMEM; + goto pages_arr_err; + } + + if (phys_pg_pack->contiguous) { + for (i = 0 ; i < num_pgs ; i++) + phys_pg_pack->pages[i] = paddr + i * page_size; + } else { + for (i = 0 ; i < num_pgs ; i++) { + phys_pg_pack->pages[i] = (u64) gen_pool_alloc( + vm->dram_pg_pool, + page_size); + if (!phys_pg_pack->pages[i]) { + dev_err(hdev->dev, + "ioctl failed to allocate page\n"); + rc = -ENOMEM; + goto page_err; + } + + num_curr_pgs++; + } + } + + spin_lock(&vm->idr_lock); + handle = idr_alloc(&vm->phys_pg_pack_handles, phys_pg_pack, 1, 0, + GFP_KERNEL); + spin_unlock(&vm->idr_lock); + + if (handle < 0) { + dev_err(hdev->dev, "Failed to get handle for page\n"); + rc = -EFAULT; + goto idr_err; + } + + for (i = 0 ; i < num_pgs ; i++) + kref_get(&vm->dram_pg_pool_refcount); + + phys_pg_pack->handle = handle; + + atomic64_add(phys_pg_pack->total_size, &ctx->dram_phys_mem); + atomic64_add(phys_pg_pack->total_size, &hdev->dram_used_mem); + + *ret_handle = handle; + + return 0; + +idr_err: +page_err: + if (!phys_pg_pack->contiguous) + for (i = 0 ; i < num_curr_pgs ; i++) + gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[i], + page_size); + + kfree(phys_pg_pack->pages); +pages_arr_err: + kfree(phys_pg_pack); +pages_pack_err: + if (contiguous) + gen_pool_free(vm->dram_pg_pool, paddr, total_size); + + return rc; +} + +/* + * get_userptr_from_host_va - initialize userptr structure from given host + * virtual address + * + * @hdev : habanalabs device structure + * @args : parameters containing the virtual address and size + * @p_userptr : pointer to result userptr structure + * + * This function does the following: + * - Allocate userptr structure + * - Pin the given host memory using the userptr structure + * - Perform DMA mapping to have the DMA addresses of the pages + */ +static int get_userptr_from_host_va(struct hl_device *hdev, + struct hl_mem_in *args, struct hl_userptr **p_userptr) +{ + struct hl_userptr *userptr; + int rc; + + userptr = kzalloc(sizeof(*userptr), GFP_KERNEL); + if (!userptr) { + rc = -ENOMEM; + goto userptr_err; + } + + rc = hl_pin_host_memory(hdev, args->map_host.host_virt_addr, + args->map_host.mem_size, userptr); + if (rc) { + dev_err(hdev->dev, "Failed to pin host memory\n"); + goto pin_err; + } + + rc = hdev->asic_funcs->asic_dma_map_sg(hdev, userptr->sgt->sgl, + userptr->sgt->nents, DMA_BIDIRECTIONAL); + if (rc) { + dev_err(hdev->dev, "failed to map sgt with DMA region\n"); + goto dma_map_err; + } + + userptr->dma_mapped = true; + userptr->dir = DMA_BIDIRECTIONAL; + userptr->vm_type = VM_TYPE_USERPTR; + + *p_userptr = userptr; + + return 0; + +dma_map_err: + hl_unpin_host_memory(hdev, userptr); +pin_err: + kfree(userptr); +userptr_err: + + return rc; +} + +/* + * free_userptr - free userptr structure + * + * @hdev : habanalabs device structure + * @userptr : userptr to free + * + * This function does the following: + * - Unpins the physical pages + * - Frees the userptr structure + */ +static void free_userptr(struct hl_device *hdev, struct hl_userptr *userptr) +{ + hl_unpin_host_memory(hdev, userptr); + kfree(userptr); +} + +/* + * dram_pg_pool_do_release - free DRAM pages pool + * + * @ref : pointer to reference object + * + * This function does the following: + * - Frees the idr structure of physical pages handles + * - Frees the generic pool of DRAM physical pages + */ +static void dram_pg_pool_do_release(struct kref *ref) +{ + struct hl_vm *vm = container_of(ref, struct hl_vm, + dram_pg_pool_refcount); + + /* + * free the idr here as only here we know for sure that there are no + * allocated physical pages and hence there are no handles in use + */ + idr_destroy(&vm->phys_pg_pack_handles); + gen_pool_destroy(vm->dram_pg_pool); +} + +/* + * free_phys_pg_pack - free physical page pack + * + * @hdev : habanalabs device structure + * @phys_pg_pack : physical page pack to free + * + * This function does the following: + * - For DRAM memory only, iterate over the pack and free each physical block + * structure by returning it to the general pool + * - Free the hl_vm_phys_pg_pack structure + */ +static void free_phys_pg_pack(struct hl_device *hdev, + struct hl_vm_phys_pg_pack *phys_pg_pack) +{ + struct hl_vm *vm = &hdev->vm; + int i; + + if (!phys_pg_pack->created_from_userptr) { + if (phys_pg_pack->contiguous) { + gen_pool_free(vm->dram_pg_pool, phys_pg_pack->pages[0], + phys_pg_pack->total_size); + + for (i = 0; i < phys_pg_pack->npages ; i++) + kref_put(&vm->dram_pg_pool_refcount, + dram_pg_pool_do_release); + } else { + for (i = 0 ; i < phys_pg_pack->npages ; i++) { + gen_pool_free(vm->dram_pg_pool, + phys_pg_pack->pages[i], + phys_pg_pack->page_size); + kref_put(&vm->dram_pg_pool_refcount, + dram_pg_pool_do_release); + } + } + } + + kfree(phys_pg_pack->pages); + kfree(phys_pg_pack); +} + +/* + * free_device_memory - free device memory + * + * @ctx : current context + * @handle : handle of the memory chunk to free + * + * This function does the following: + * - Free the device memory related to the given handle + */ +static int free_device_memory(struct hl_ctx *ctx, u32 handle) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm *vm = &hdev->vm; + struct hl_vm_phys_pg_pack *phys_pg_pack; + + spin_lock(&vm->idr_lock); + phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); + if (phys_pg_pack) { + if (atomic_read(&phys_pg_pack->mapping_cnt) > 0) { + dev_err(hdev->dev, "handle %u is mapped, cannot free\n", + handle); + spin_unlock(&vm->idr_lock); + return -EINVAL; + } + + /* + * must remove from idr before the freeing of the physical + * pages as the refcount of the pool is also the trigger of the + * idr destroy + */ + idr_remove(&vm->phys_pg_pack_handles, handle); + spin_unlock(&vm->idr_lock); + + atomic64_sub(phys_pg_pack->total_size, &ctx->dram_phys_mem); + atomic64_sub(phys_pg_pack->total_size, &hdev->dram_used_mem); + + free_phys_pg_pack(hdev, phys_pg_pack); + } else { + spin_unlock(&vm->idr_lock); + dev_err(hdev->dev, + "free device memory failed, no match for handle %u\n", + handle); + return -EINVAL; + } + + return 0; +} + +/* + * clear_va_list_locked - free virtual addresses list + * + * @hdev : habanalabs device structure + * @va_list : list of virtual addresses to free + * + * This function does the following: + * - Iterate over the list and free each virtual addresses block + * + * This function should be called only when va_list lock is taken + */ +static void clear_va_list_locked(struct hl_device *hdev, + struct list_head *va_list) +{ + struct hl_vm_va_block *va_block, *tmp; + + list_for_each_entry_safe(va_block, tmp, va_list, node) { + list_del(&va_block->node); + kfree(va_block); + } +} + +/* + * print_va_list_locked - print virtual addresses list + * + * @hdev : habanalabs device structure + * @va_list : list of virtual addresses to print + * + * This function does the following: + * - Iterate over the list and print each virtual addresses block + * + * This function should be called only when va_list lock is taken + */ +static void print_va_list_locked(struct hl_device *hdev, + struct list_head *va_list) +{ +#if HL_MMU_DEBUG + struct hl_vm_va_block *va_block; + + dev_dbg(hdev->dev, "print va list:\n"); + + list_for_each_entry(va_block, va_list, node) + dev_dbg(hdev->dev, + "va block, start: 0x%llx, end: 0x%llx, size: %llu\n", + va_block->start, va_block->end, va_block->size); +#endif +} + +/* + * merge_va_blocks_locked - merge a virtual block if possible + * + * @hdev : pointer to the habanalabs device structure + * @va_list : pointer to the virtual addresses block list + * @va_block : virtual block to merge with adjacent blocks + * + * This function does the following: + * - Merge the given blocks with the adjacent blocks if their virtual ranges + * create a contiguous virtual range + * + * This Function should be called only when va_list lock is taken + */ +static void merge_va_blocks_locked(struct hl_device *hdev, + struct list_head *va_list, struct hl_vm_va_block *va_block) +{ + struct hl_vm_va_block *prev, *next; + + prev = list_prev_entry(va_block, node); + if (&prev->node != va_list && prev->end + 1 == va_block->start) { + prev->end = va_block->end; + prev->size = prev->end - prev->start; + list_del(&va_block->node); + kfree(va_block); + va_block = prev; + } + + next = list_next_entry(va_block, node); + if (&next->node != va_list && va_block->end + 1 == next->start) { + next->start = va_block->start; + next->size = next->end - next->start; + list_del(&va_block->node); + kfree(va_block); + } +} + +/* + * add_va_block_locked - add a virtual block to the virtual addresses list + * + * @hdev : pointer to the habanalabs device structure + * @va_list : pointer to the virtual addresses block list + * @start : start virtual address + * @end : end virtual address + * + * This function does the following: + * - Add the given block to the virtual blocks list and merge with other + * blocks if a contiguous virtual block can be created + * + * This Function should be called only when va_list lock is taken + */ +static int add_va_block_locked(struct hl_device *hdev, + struct list_head *va_list, u64 start, u64 end) +{ + struct hl_vm_va_block *va_block, *res = NULL; + u64 size = end - start; + + print_va_list_locked(hdev, va_list); + + list_for_each_entry(va_block, va_list, node) { + /* TODO: remove upon matureness */ + if (hl_mem_area_crosses_range(start, size, va_block->start, + va_block->end)) { + dev_err(hdev->dev, + "block crossing ranges at start 0x%llx, end 0x%llx\n", + va_block->start, va_block->end); + return -EINVAL; + } + + if (va_block->end < start) + res = va_block; + } + + va_block = kmalloc(sizeof(*va_block), GFP_KERNEL); + if (!va_block) + return -ENOMEM; + + va_block->start = start; + va_block->end = end; + va_block->size = size; + + if (!res) + list_add(&va_block->node, va_list); + else + list_add(&va_block->node, &res->node); + + merge_va_blocks_locked(hdev, va_list, va_block); + + print_va_list_locked(hdev, va_list); + + return 0; +} + +/* + * add_va_block - wrapper for add_va_block_locked + * + * @hdev : pointer to the habanalabs device structure + * @va_list : pointer to the virtual addresses block list + * @start : start virtual address + * @end : end virtual address + * + * This function does the following: + * - Takes the list lock and calls add_va_block_locked + */ +static inline int add_va_block(struct hl_device *hdev, + struct hl_va_range *va_range, u64 start, u64 end) +{ + int rc; + + mutex_lock(&va_range->lock); + rc = add_va_block_locked(hdev, &va_range->list, start, end); + mutex_unlock(&va_range->lock); + + return rc; +} + +/* + * get_va_block - get a virtual block with the requested size + * + * @hdev : pointer to the habanalabs device structure + * @va_range : pointer to the virtual addresses range + * @size : requested block size + * @hint_addr : hint for request address by the user + * @is_userptr : is host or DRAM memory + * + * This function does the following: + * - Iterate on the virtual block list to find a suitable virtual block for the + * requested size + * - Reserve the requested block and update the list + * - Return the start address of the virtual block + */ +static u64 get_va_block(struct hl_device *hdev, + struct hl_va_range *va_range, u32 size, u64 hint_addr, + bool is_userptr) +{ + struct hl_vm_va_block *va_block, *new_va_block = NULL; + u64 valid_start, valid_size, prev_start, prev_end, page_mask, + res_valid_start = 0, res_valid_size = 0; + u32 page_size; + bool add_prev = false; + + if (is_userptr) { + /* + * We cannot know if the user allocated memory with huge pages + * or not, hence we continue with the biggest possible + * granularity. + */ + page_size = PAGE_SIZE_2MB; + page_mask = PAGE_MASK_2MB; + } else { + page_size = hdev->asic_prop.dram_page_size; + page_mask = ~((u64)page_size - 1); + } + + mutex_lock(&va_range->lock); + + print_va_list_locked(hdev, &va_range->list); + + list_for_each_entry(va_block, &va_range->list, node) { + /* calc the first possible aligned addr */ + valid_start = va_block->start; + + + if (valid_start & (page_size - 1)) { + valid_start &= page_mask; + valid_start += page_size; + if (valid_start > va_block->end) + continue; + } + + valid_size = va_block->end - valid_start; + + if (valid_size >= size && + (!new_va_block || valid_size < res_valid_size)) { + + new_va_block = va_block; + res_valid_start = valid_start; + res_valid_size = valid_size; + } + + if (hint_addr && hint_addr >= valid_start && + ((hint_addr + size) <= va_block->end)) { + new_va_block = va_block; + res_valid_start = hint_addr; + res_valid_size = valid_size; + break; + } + } + + if (!new_va_block) { + dev_err(hdev->dev, "no available va block for size %u\n", size); + goto out; + } + + if (res_valid_start > new_va_block->start) { + prev_start = new_va_block->start; + prev_end = res_valid_start - 1; + + new_va_block->start = res_valid_start; + new_va_block->size = res_valid_size; + + add_prev = true; + } + + if (new_va_block->size > size) { + new_va_block->start += size; + new_va_block->size = new_va_block->end - new_va_block->start; + } else { + list_del(&new_va_block->node); + kfree(new_va_block); + } + + if (add_prev) + add_va_block_locked(hdev, &va_range->list, prev_start, + prev_end); + + print_va_list_locked(hdev, &va_range->list); +out: + mutex_unlock(&va_range->lock); + + return res_valid_start; +} + +/* + * get_sg_info - get number of pages and the DMA address from SG list + * + * @sg : the SG list + * @dma_addr : pointer to DMA address to return + * + * Calculate the number of consecutive pages described by the SG list. Take the + * offset of the address in the first page, add to it the length and round it up + * to the number of needed pages. + */ +static u32 get_sg_info(struct scatterlist *sg, dma_addr_t *dma_addr) +{ + *dma_addr = sg_dma_address(sg); + + return ((((*dma_addr) & (PAGE_SIZE - 1)) + sg_dma_len(sg)) + + (PAGE_SIZE - 1)) >> PAGE_SHIFT; +} + +/* + * init_phys_pg_pack_from_userptr - initialize physical page pack from host + * memory + * + * @ctx : current context + * @userptr : userptr to initialize from + * @pphys_pg_pack : res pointer + * + * This function does the following: + * - Pin the physical pages related to the given virtual block + * - Create a physical page pack from the physical pages related to the given + * virtual block + */ +static int init_phys_pg_pack_from_userptr(struct hl_ctx *ctx, + struct hl_userptr *userptr, + struct hl_vm_phys_pg_pack **pphys_pg_pack) +{ + struct hl_vm_phys_pg_pack *phys_pg_pack; + struct scatterlist *sg; + dma_addr_t dma_addr; + u64 page_mask; + u32 npages, total_npages, page_size = PAGE_SIZE; + bool first = true, is_huge_page_opt = true; + int rc, i, j; + + phys_pg_pack = kzalloc(sizeof(*phys_pg_pack), GFP_KERNEL); + if (!phys_pg_pack) + return -ENOMEM; + + phys_pg_pack->vm_type = userptr->vm_type; + phys_pg_pack->created_from_userptr = true; + phys_pg_pack->asid = ctx->asid; + atomic_set(&phys_pg_pack->mapping_cnt, 1); + + /* Only if all dma_addrs are aligned to 2MB and their + * sizes is at least 2MB, we can use huge page mapping. + * We limit the 2MB optimization to this condition, + * since later on we acquire the related VA range as one + * consecutive block. + */ + total_npages = 0; + for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) { + npages = get_sg_info(sg, &dma_addr); + + total_npages += npages; + + if (first) { + first = false; + dma_addr &= PAGE_MASK_2MB; + } + + if ((npages % PGS_IN_2MB_PAGE) || + (dma_addr & (PAGE_SIZE_2MB - 1))) + is_huge_page_opt = false; + } + + if (is_huge_page_opt) { + page_size = PAGE_SIZE_2MB; + total_npages /= PGS_IN_2MB_PAGE; + } + + page_mask = ~(((u64) page_size) - 1); + + phys_pg_pack->pages = kcalloc(total_npages, sizeof(u64), GFP_KERNEL); + if (!phys_pg_pack->pages) { + rc = -ENOMEM; + goto page_pack_arr_mem_err; + } + + phys_pg_pack->npages = total_npages; + phys_pg_pack->page_size = page_size; + phys_pg_pack->total_size = total_npages * page_size; + + j = 0; + first = true; + for_each_sg(userptr->sgt->sgl, sg, userptr->sgt->nents, i) { + npages = get_sg_info(sg, &dma_addr); + + /* align down to physical page size and save the offset */ + if (first) { + first = false; + phys_pg_pack->offset = dma_addr & (page_size - 1); + dma_addr &= page_mask; + } + + while (npages) { + phys_pg_pack->pages[j++] = dma_addr; + dma_addr += page_size; + + if (is_huge_page_opt) + npages -= PGS_IN_2MB_PAGE; + else + npages--; + } + } + + *pphys_pg_pack = phys_pg_pack; + + return 0; + +page_pack_arr_mem_err: + kfree(phys_pg_pack); + + return rc; +} + +/* + * map_phys_page_pack - maps the physical page pack + * + * @ctx : current context + * @vaddr : start address of the virtual area to map from + * @phys_pg_pack : the pack of physical pages to map to + * + * This function does the following: + * - Maps each chunk of virtual memory to matching physical chunk + * - Stores number of successful mappings in the given argument + * - Returns 0 on success, error code otherwise. + */ +static int map_phys_page_pack(struct hl_ctx *ctx, u64 vaddr, + struct hl_vm_phys_pg_pack *phys_pg_pack) +{ + struct hl_device *hdev = ctx->hdev; + u64 next_vaddr = vaddr, paddr; + u32 page_size = phys_pg_pack->page_size; + int i, rc = 0, mapped_pg_cnt = 0; + + for (i = 0 ; i < phys_pg_pack->npages ; i++) { + paddr = phys_pg_pack->pages[i]; + + /* For accessing the host we need to turn on bit 39 */ + if (phys_pg_pack->created_from_userptr) + paddr += hdev->asic_prop.host_phys_base_address; + + rc = hl_mmu_map(ctx, next_vaddr, paddr, page_size); + if (rc) { + dev_err(hdev->dev, + "map failed for handle %u, npages: %d, mapped: %d", + phys_pg_pack->handle, phys_pg_pack->npages, + mapped_pg_cnt); + goto err; + } + + mapped_pg_cnt++; + next_vaddr += page_size; + } + + return 0; + +err: + next_vaddr = vaddr; + for (i = 0 ; i < mapped_pg_cnt ; i++) { + if (hl_mmu_unmap(ctx, next_vaddr, page_size)) + dev_warn_ratelimited(hdev->dev, + "failed to unmap handle %u, va: 0x%llx, pa: 0x%llx, page size: %u\n", + phys_pg_pack->handle, next_vaddr, + phys_pg_pack->pages[i], page_size); + + next_vaddr += page_size; + } + + return rc; +} + +static int get_paddr_from_handle(struct hl_ctx *ctx, struct hl_mem_in *args, + u64 *paddr) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm *vm = &hdev->vm; + struct hl_vm_phys_pg_pack *phys_pg_pack; + u32 handle; + + handle = lower_32_bits(args->map_device.handle); + spin_lock(&vm->idr_lock); + phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); + if (!phys_pg_pack) { + spin_unlock(&vm->idr_lock); + dev_err(hdev->dev, "no match for handle %u\n", handle); + return -EINVAL; + } + + *paddr = phys_pg_pack->pages[0]; + + spin_unlock(&vm->idr_lock); + + return 0; +} + +/* + * map_device_va - map the given memory + * + * @ctx : current context + * @args : host parameters with handle/host virtual address + * @device_addr : pointer to result device virtual address + * + * This function does the following: + * - If given a physical device memory handle, map to a device virtual block + * and return the start address of this block + * - If given a host virtual address and size, find the related physical pages, + * map a device virtual block to this pages and return the start address of + * this block + */ +static int map_device_va(struct hl_ctx *ctx, struct hl_mem_in *args, + u64 *device_addr) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm *vm = &hdev->vm; + struct hl_vm_phys_pg_pack *phys_pg_pack; + struct hl_userptr *userptr = NULL; + struct hl_vm_hash_node *hnode; + enum vm_type_t *vm_type; + u64 ret_vaddr, hint_addr; + u32 handle = 0; + int rc; + bool is_userptr = args->flags & HL_MEM_USERPTR; + + /* Assume failure */ + *device_addr = 0; + + if (is_userptr) { + rc = get_userptr_from_host_va(hdev, args, &userptr); + if (rc) { + dev_err(hdev->dev, "failed to get userptr from va\n"); + return rc; + } + + rc = init_phys_pg_pack_from_userptr(ctx, userptr, + &phys_pg_pack); + if (rc) { + dev_err(hdev->dev, + "unable to init page pack for vaddr 0x%llx\n", + args->map_host.host_virt_addr); + goto init_page_pack_err; + } + + vm_type = (enum vm_type_t *) userptr; + hint_addr = args->map_host.hint_addr; + } else { + handle = lower_32_bits(args->map_device.handle); + + spin_lock(&vm->idr_lock); + phys_pg_pack = idr_find(&vm->phys_pg_pack_handles, handle); + if (!phys_pg_pack) { + spin_unlock(&vm->idr_lock); + dev_err(hdev->dev, + "no match for handle %u\n", handle); + return -EINVAL; + } + + /* increment now to avoid freeing device memory while mapping */ + atomic_inc(&phys_pg_pack->mapping_cnt); + + spin_unlock(&vm->idr_lock); + + vm_type = (enum vm_type_t *) phys_pg_pack; + + hint_addr = args->map_device.hint_addr; + } + + /* + * relevant for mapping device physical memory only, as host memory is + * implicitly shared + */ + if (!is_userptr && !(phys_pg_pack->flags & HL_MEM_SHARED) && + phys_pg_pack->asid != ctx->asid) { + dev_err(hdev->dev, + "Failed to map memory, handle %u is not shared\n", + handle); + rc = -EPERM; + goto shared_err; + } + + hnode = kzalloc(sizeof(*hnode), GFP_KERNEL); + if (!hnode) { + rc = -ENOMEM; + goto hnode_err; + } + + ret_vaddr = get_va_block(hdev, + is_userptr ? &ctx->host_va_range : &ctx->dram_va_range, + phys_pg_pack->total_size, hint_addr, is_userptr); + if (!ret_vaddr) { + dev_err(hdev->dev, "no available va block for handle %u\n", + handle); + rc = -ENOMEM; + goto va_block_err; + } + + mutex_lock(&ctx->mmu_lock); + + rc = map_phys_page_pack(ctx, ret_vaddr, phys_pg_pack); + if (rc) { + mutex_unlock(&ctx->mmu_lock); + dev_err(hdev->dev, "mapping page pack failed for handle %u\n", + handle); + goto map_err; + } + + hdev->asic_funcs->mmu_invalidate_cache_range(hdev, false, ctx->asid, + ret_vaddr, phys_pg_pack->total_size); + + mutex_unlock(&ctx->mmu_lock); + + ret_vaddr += phys_pg_pack->offset; + + hnode->ptr = vm_type; + hnode->vaddr = ret_vaddr; + + mutex_lock(&ctx->mem_hash_lock); + hash_add(ctx->mem_hash, &hnode->node, ret_vaddr); + mutex_unlock(&ctx->mem_hash_lock); + + *device_addr = ret_vaddr; + + if (is_userptr) + free_phys_pg_pack(hdev, phys_pg_pack); + + return 0; + +map_err: + if (add_va_block(hdev, + is_userptr ? &ctx->host_va_range : &ctx->dram_va_range, + ret_vaddr, + ret_vaddr + phys_pg_pack->total_size - 1)) + dev_warn(hdev->dev, + "release va block failed for handle 0x%x, vaddr: 0x%llx\n", + handle, ret_vaddr); + +va_block_err: + kfree(hnode); +hnode_err: +shared_err: + atomic_dec(&phys_pg_pack->mapping_cnt); + if (is_userptr) + free_phys_pg_pack(hdev, phys_pg_pack); +init_page_pack_err: + if (is_userptr) + free_userptr(hdev, userptr); + + return rc; +} + +/* + * unmap_device_va - unmap the given device virtual address + * + * @ctx : current context + * @vaddr : device virtual address to unmap + * + * This function does the following: + * - Unmap the physical pages related to the given virtual address + * - return the device virtual block to the virtual block list + */ +static int unmap_device_va(struct hl_ctx *ctx, u64 vaddr) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm_phys_pg_pack *phys_pg_pack = NULL; + struct hl_vm_hash_node *hnode = NULL; + struct hl_userptr *userptr = NULL; + enum vm_type_t *vm_type; + u64 next_vaddr; + u32 page_size; + bool is_userptr; + int i, rc; + + /* protect from double entrance */ + mutex_lock(&ctx->mem_hash_lock); + hash_for_each_possible(ctx->mem_hash, hnode, node, (unsigned long)vaddr) + if (vaddr == hnode->vaddr) + break; + + if (!hnode) { + mutex_unlock(&ctx->mem_hash_lock); + dev_err(hdev->dev, + "unmap failed, no mem hnode for vaddr 0x%llx\n", + vaddr); + return -EINVAL; + } + + hash_del(&hnode->node); + mutex_unlock(&ctx->mem_hash_lock); + + vm_type = hnode->ptr; + + if (*vm_type == VM_TYPE_USERPTR) { + is_userptr = true; + userptr = hnode->ptr; + rc = init_phys_pg_pack_from_userptr(ctx, userptr, + &phys_pg_pack); + if (rc) { + dev_err(hdev->dev, + "unable to init page pack for vaddr 0x%llx\n", + vaddr); + goto vm_type_err; + } + } else if (*vm_type == VM_TYPE_PHYS_PACK) { + is_userptr = false; + phys_pg_pack = hnode->ptr; + } else { + dev_warn(hdev->dev, + "unmap failed, unknown vm desc for vaddr 0x%llx\n", + vaddr); + rc = -EFAULT; + goto vm_type_err; + } + + if (atomic_read(&phys_pg_pack->mapping_cnt) == 0) { + dev_err(hdev->dev, "vaddr 0x%llx is not mapped\n", vaddr); + rc = -EINVAL; + goto mapping_cnt_err; + } + + page_size = phys_pg_pack->page_size; + vaddr &= ~(((u64) page_size) - 1); + + next_vaddr = vaddr; + + mutex_lock(&ctx->mmu_lock); + + for (i = 0 ; i < phys_pg_pack->npages ; i++, next_vaddr += page_size) + if (hl_mmu_unmap(ctx, next_vaddr, page_size)) + dev_warn_ratelimited(hdev->dev, + "unmap failed for vaddr: 0x%llx\n", next_vaddr); + + hdev->asic_funcs->mmu_invalidate_cache_range(hdev, true, ctx->asid, + vaddr, phys_pg_pack->total_size); + + mutex_unlock(&ctx->mmu_lock); + + if (add_va_block(hdev, + is_userptr ? &ctx->host_va_range : &ctx->dram_va_range, + vaddr, + vaddr + phys_pg_pack->total_size - 1)) + dev_warn(hdev->dev, "add va block failed for vaddr: 0x%llx\n", + vaddr); + + atomic_dec(&phys_pg_pack->mapping_cnt); + kfree(hnode); + + if (is_userptr) { + free_phys_pg_pack(hdev, phys_pg_pack); + free_userptr(hdev, userptr); + } + + return 0; + +mapping_cnt_err: + if (is_userptr) + free_phys_pg_pack(hdev, phys_pg_pack); +vm_type_err: + mutex_lock(&ctx->mem_hash_lock); + hash_add(ctx->mem_hash, &hnode->node, vaddr); + mutex_unlock(&ctx->mem_hash_lock); + + return rc; +} + +int hl_mem_ioctl(struct hl_fpriv *hpriv, void *data) +{ + union hl_mem_args *args = data; + struct hl_device *hdev = hpriv->hdev; + struct hl_ctx *ctx = hpriv->ctx; + u64 device_addr = 0; + u32 handle = 0; + int rc; + + if (hl_device_disabled_or_in_reset(hdev)) { + dev_warn_ratelimited(hdev->dev, + "Device is disabled or in reset. Can't execute memory IOCTL\n"); + return -EBUSY; + } + + if (hdev->mmu_enable) { + switch (args->in.op) { + case HL_MEM_OP_ALLOC: + if (!hdev->dram_supports_virtual_memory) { + dev_err(hdev->dev, + "DRAM alloc is not supported\n"); + rc = -EINVAL; + goto out; + } + if (args->in.alloc.mem_size == 0) { + dev_err(hdev->dev, + "alloc size must be larger than 0\n"); + rc = -EINVAL; + goto out; + } + rc = alloc_device_memory(ctx, &args->in, &handle); + + memset(args, 0, sizeof(*args)); + args->out.handle = (__u64) handle; + break; + + case HL_MEM_OP_FREE: + if (!hdev->dram_supports_virtual_memory) { + dev_err(hdev->dev, + "DRAM free is not supported\n"); + rc = -EINVAL; + goto out; + } + rc = free_device_memory(ctx, args->in.free.handle); + break; + + case HL_MEM_OP_MAP: + rc = map_device_va(ctx, &args->in, &device_addr); + + memset(args, 0, sizeof(*args)); + args->out.device_virt_addr = device_addr; + break; + + case HL_MEM_OP_UNMAP: + rc = unmap_device_va(ctx, + args->in.unmap.device_virt_addr); + break; + + default: + dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n"); + rc = -ENOTTY; + break; + } + } else { + switch (args->in.op) { + case HL_MEM_OP_ALLOC: + if (args->in.alloc.mem_size == 0) { + dev_err(hdev->dev, + "alloc size must be larger than 0\n"); + rc = -EINVAL; + goto out; + } + + /* Force contiguous as there are no real MMU + * translations to overcome physical memory gaps + */ + args->in.flags |= HL_MEM_CONTIGUOUS; + rc = alloc_device_memory(ctx, &args->in, &handle); + + memset(args, 0, sizeof(*args)); + args->out.handle = (__u64) handle; + break; + + case HL_MEM_OP_FREE: + rc = free_device_memory(ctx, args->in.free.handle); + break; + + case HL_MEM_OP_MAP: + if (args->in.flags & HL_MEM_USERPTR) { + device_addr = args->in.map_host.host_virt_addr; + rc = 0; + } else { + rc = get_paddr_from_handle(ctx, &args->in, + &device_addr); + } + + memset(args, 0, sizeof(*args)); + args->out.device_virt_addr = device_addr; + break; + + case HL_MEM_OP_UNMAP: + rc = 0; + break; + + default: + dev_err(hdev->dev, "Unknown opcode for memory IOCTL\n"); + rc = -ENOTTY; + break; + } + } + +out: + return rc; +} /* * hl_pin_host_memory - pins a chunk of host memory @@ -196,3 +1384,332 @@ bool hl_userptr_is_pinned(struct hl_device *hdev, u64 addr, return false; } + +/* + * hl_va_range_init - initialize virtual addresses range + * + * @hdev : pointer to the habanalabs device structure + * @va_range : pointer to the range to initialize + * @start : range start address + * @end : range end address + * + * This function does the following: + * - Initializes the virtual addresses list of the given range with the given + * addresses. + */ +static int hl_va_range_init(struct hl_device *hdev, + struct hl_va_range *va_range, u64 start, u64 end) +{ + int rc; + + INIT_LIST_HEAD(&va_range->list); + + /* PAGE_SIZE alignment */ + + if (start & (PAGE_SIZE - 1)) { + start &= PAGE_MASK; + start += PAGE_SIZE; + } + + if (end & (PAGE_SIZE - 1)) + end &= PAGE_MASK; + + if (start >= end) { + dev_err(hdev->dev, "too small vm range for va list\n"); + return -EFAULT; + } + + rc = add_va_block(hdev, va_range, start, end); + + if (rc) { + dev_err(hdev->dev, "Failed to init host va list\n"); + return rc; + } + + va_range->start_addr = start; + va_range->end_addr = end; + + return 0; +} + +/* + * hl_vm_ctx_init_with_ranges - initialize virtual memory for context + * + * @ctx : pointer to the habanalabs context structure + * @host_range_start : host virtual addresses range start + * @host_range_end : host virtual addresses range end + * @dram_range_start : dram virtual addresses range start + * @dram_range_end : dram virtual addresses range end + * + * This function initializes the following: + * - MMU for context + * - Virtual address to area descriptor hashtable + * - Virtual block list of available virtual memory + */ +int hl_vm_ctx_init_with_ranges(struct hl_ctx *ctx, u64 host_range_start, + u64 host_range_end, u64 dram_range_start, + u64 dram_range_end) +{ + struct hl_device *hdev = ctx->hdev; + int rc; + + hl_mmu_ctx_init(ctx); + + mutex_init(&ctx->mem_hash_lock); + hash_init(ctx->mem_hash); + + mutex_init(&ctx->host_va_range.lock); + + rc = hl_va_range_init(hdev, &ctx->host_va_range, host_range_start, + host_range_end); + if (rc) { + dev_err(hdev->dev, "failed to init host vm range\n"); + goto host_vm_err; + } + + mutex_init(&ctx->dram_va_range.lock); + + rc = hl_va_range_init(hdev, &ctx->dram_va_range, dram_range_start, + dram_range_end); + if (rc) { + dev_err(hdev->dev, "failed to init dram vm range\n"); + goto dram_vm_err; + } + + return 0; + +dram_vm_err: + mutex_destroy(&ctx->dram_va_range.lock); + + mutex_lock(&ctx->host_va_range.lock); + clear_va_list_locked(hdev, &ctx->host_va_range.list); + mutex_unlock(&ctx->host_va_range.lock); +host_vm_err: + mutex_destroy(&ctx->host_va_range.lock); + mutex_destroy(&ctx->mem_hash_lock); + hl_mmu_ctx_fini(ctx); + + return rc; +} + +int hl_vm_ctx_init(struct hl_ctx *ctx) +{ + struct asic_fixed_properties *prop = &ctx->hdev->asic_prop; + u64 host_range_start, host_range_end, dram_range_start, + dram_range_end; + + atomic64_set(&ctx->dram_phys_mem, 0); + + /* + * - If MMU is enabled, init the ranges as usual. + * - If MMU is disabled, in case of host mapping, the returned address + * is the given one. + * In case of DRAM mapping, the returned address is the physical + * address of the memory related to the given handle. + */ + if (ctx->hdev->mmu_enable) { + dram_range_start = prop->va_space_dram_start_address; + dram_range_end = prop->va_space_dram_end_address; + host_range_start = prop->va_space_host_start_address; + host_range_end = prop->va_space_host_end_address; + } else { + dram_range_start = prop->dram_user_base_address; + dram_range_end = prop->dram_end_address; + host_range_start = prop->dram_user_base_address; + host_range_end = prop->dram_end_address; + } + + return hl_vm_ctx_init_with_ranges(ctx, host_range_start, host_range_end, + dram_range_start, dram_range_end); +} + +/* + * hl_va_range_fini - clear a virtual addresses range + * + * @hdev : pointer to the habanalabs structure + * va_range : pointer to virtual addresses range + * + * This function initializes the following: + * - Checks that the given range contains the whole initial range + * - Frees the virtual addresses block list and its lock + */ +static void hl_va_range_fini(struct hl_device *hdev, + struct hl_va_range *va_range) +{ + struct hl_vm_va_block *va_block; + + if (list_empty(&va_range->list)) { + dev_warn(hdev->dev, + "va list should not be empty on cleanup!\n"); + goto out; + } + + if (!list_is_singular(&va_range->list)) { + dev_warn(hdev->dev, + "va list should not contain multiple blocks on cleanup!\n"); + goto free_va_list; + } + + va_block = list_first_entry(&va_range->list, typeof(*va_block), node); + + if (va_block->start != va_range->start_addr || + va_block->end != va_range->end_addr) { + dev_warn(hdev->dev, + "wrong va block on cleanup, from 0x%llx to 0x%llx\n", + va_block->start, va_block->end); + goto free_va_list; + } + +free_va_list: + mutex_lock(&va_range->lock); + clear_va_list_locked(hdev, &va_range->list); + mutex_unlock(&va_range->lock); + +out: + mutex_destroy(&va_range->lock); +} + +/* + * hl_vm_ctx_fini - virtual memory teardown of context + * + * @ctx : pointer to the habanalabs context structure + * + * This function perform teardown the following: + * - Virtual block list of available virtual memory + * - Virtual address to area descriptor hashtable + * - MMU for context + * + * In addition this function does the following: + * - Unmaps the existing hashtable nodes if the hashtable is not empty. The + * hashtable should be empty as no valid mappings should exist at this + * point. + * - Frees any existing physical page list from the idr which relates to the + * current context asid. + * - This function checks the virtual block list for correctness. At this point + * the list should contain one element which describes the whole virtual + * memory range of the context. Otherwise, a warning is printed. + */ +void hl_vm_ctx_fini(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + struct hl_vm *vm = &hdev->vm; + struct hl_vm_phys_pg_pack *phys_pg_list; + struct hl_vm_hash_node *hnode; + struct hlist_node *tmp_node; + int i; + + if (!hash_empty(ctx->mem_hash)) + dev_notice(hdev->dev, "ctx is freed while it has va in use\n"); + + hash_for_each_safe(ctx->mem_hash, i, tmp_node, hnode, node) { + dev_dbg(hdev->dev, + "hl_mem_hash_node of vaddr 0x%llx of asid %d is still alive\n", + hnode->vaddr, ctx->asid); + unmap_device_va(ctx, hnode->vaddr); + } + + spin_lock(&vm->idr_lock); + idr_for_each_entry(&vm->phys_pg_pack_handles, phys_pg_list, i) + if (phys_pg_list->asid == ctx->asid) { + dev_dbg(hdev->dev, + "page list 0x%p of asid %d is still alive\n", + phys_pg_list, ctx->asid); + free_phys_pg_pack(hdev, phys_pg_list); + idr_remove(&vm->phys_pg_pack_handles, i); + } + spin_unlock(&vm->idr_lock); + + hl_va_range_fini(hdev, &ctx->dram_va_range); + hl_va_range_fini(hdev, &ctx->host_va_range); + + mutex_destroy(&ctx->mem_hash_lock); + hl_mmu_ctx_fini(ctx); +} + +/* + * hl_vm_init - initialize virtual memory module + * + * @hdev : pointer to the habanalabs device structure + * + * This function initializes the following: + * - MMU module + * - DRAM physical pages pool of 2MB + * - Idr for device memory allocation handles + */ +int hl_vm_init(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + struct hl_vm *vm = &hdev->vm; + int rc; + + rc = hl_mmu_init(hdev); + if (rc) { + dev_err(hdev->dev, "Failed to init MMU\n"); + return rc; + } + + vm->dram_pg_pool = gen_pool_create(__ffs(prop->dram_page_size), -1); + if (!vm->dram_pg_pool) { + dev_err(hdev->dev, "Failed to create dram page pool\n"); + rc = -ENOMEM; + goto pool_create_err; + } + + kref_init(&vm->dram_pg_pool_refcount); + + rc = gen_pool_add(vm->dram_pg_pool, prop->dram_user_base_address, + prop->dram_end_address - prop->dram_user_base_address, + -1); + + if (rc) { + dev_err(hdev->dev, + "Failed to add memory to dram page pool %d\n", rc); + goto pool_add_err; + } + + spin_lock_init(&vm->idr_lock); + idr_init(&vm->phys_pg_pack_handles); + + atomic64_set(&hdev->dram_used_mem, 0); + + vm->init_done = true; + + return 0; + +pool_add_err: + gen_pool_destroy(vm->dram_pg_pool); +pool_create_err: + hl_mmu_fini(hdev); + + return rc; +} + +/* + * hl_vm_fini - virtual memory module teardown + * + * @hdev : pointer to the habanalabs device structure + * + * This function perform teardown to the following: + * - Idr for device memory allocation handles + * - DRAM physical pages pool of 2MB + * - MMU module + */ +void hl_vm_fini(struct hl_device *hdev) +{ + struct hl_vm *vm = &hdev->vm; + + if (!vm->init_done) + return; + + /* + * At this point all the contexts should be freed and hence no DRAM + * memory should be in use. Hence the DRAM pool should be freed here. + */ + if (kref_put(&vm->dram_pg_pool_refcount, dram_pg_pool_do_release) != 1) + dev_warn(hdev->dev, "dram_pg_pool was not destroyed on %s\n", + __func__); + + hl_mmu_fini(hdev); + + vm->init_done = false; +} diff --git a/drivers/misc/habanalabs/mmu.c b/drivers/misc/habanalabs/mmu.c new file mode 100644 index 000000000000..79c70d92e74b --- /dev/null +++ b/drivers/misc/habanalabs/mmu.c @@ -0,0 +1,691 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Copyright 2016-2019 HabanaLabs, Ltd. + * All Rights Reserved. + */ + +#include "habanalabs.h" +#include "include/hw_ip/mmu/mmu_general.h" + +#include +#include + +static struct pgt_info *get_pgt_info(struct hl_ctx *ctx, u64 addr) +{ + struct pgt_info *pgt_info = NULL; + + hash_for_each_possible(ctx->mmu_hash, pgt_info, node, + (unsigned long) addr) + if (addr == pgt_info->addr) + break; + + return pgt_info; +} + +static void free_hop(struct hl_ctx *ctx, u64 hop_addr) +{ + struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr); + + gen_pool_free(pgt_info->ctx->hdev->mmu_pgt_pool, pgt_info->addr, + ctx->hdev->asic_prop.mmu_hop_table_size); + hash_del(&pgt_info->node); + + kfree(pgt_info); +} + +static u64 alloc_hop(struct hl_ctx *ctx) +{ + struct hl_device *hdev = ctx->hdev; + struct pgt_info *pgt_info; + u64 addr; + + pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL); + if (!pgt_info) + return ULLONG_MAX; + + addr = (u64) gen_pool_alloc(hdev->mmu_pgt_pool, + hdev->asic_prop.mmu_hop_table_size); + if (!addr) { + dev_err(hdev->dev, "failed to allocate page\n"); + kfree(pgt_info); + return ULLONG_MAX; + } + + pgt_info->addr = addr; + pgt_info->ctx = ctx; + pgt_info->num_of_ptes = 0; + hash_add(ctx->mmu_hash, &pgt_info->node, addr); + + return addr; +} + +static inline void clear_pte(struct hl_device *hdev, u64 pte_addr) +{ + /* clear the last and present bits */ + hdev->asic_funcs->write_pte(hdev, pte_addr, 0); +} + +static inline void get_pte(struct hl_ctx *ctx, u64 hop_addr) +{ + get_pgt_info(ctx, hop_addr)->num_of_ptes++; +} + +/* + * put_pte - decrement the num of ptes and free the hop if possible + * + * @ctx: pointer to the context structure + * @hop_addr: addr of the hop + * + * This function returns the number of ptes left on this hop. If the number is + * 0, it means the pte was freed. + */ +static inline int put_pte(struct hl_ctx *ctx, u64 hop_addr) +{ + struct pgt_info *pgt_info = get_pgt_info(ctx, hop_addr); + int num_of_ptes_left; + + pgt_info->num_of_ptes--; + + /* + * Need to save the number of ptes left because free_hop might free + * the pgt_info + */ + num_of_ptes_left = pgt_info->num_of_ptes; + if (!num_of_ptes_left) + free_hop(ctx, hop_addr); + + return num_of_ptes_left; +} + +static inline u64 get_hop0_addr(struct hl_ctx *ctx) +{ + return ctx->hdev->asic_prop.mmu_pgt_addr + + (ctx->asid * ctx->hdev->asic_prop.mmu_hop_table_size); +} + +static inline u64 get_hopN_pte_addr(struct hl_ctx *ctx, u64 hop_addr, + u64 virt_addr, u64 mask, u64 shift) +{ + return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * + ((virt_addr & mask) >> shift); +} + +static inline u64 get_hop0_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP0_MASK, HOP0_SHIFT); +} + +static inline u64 get_hop1_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP1_MASK, HOP1_SHIFT); +} + +static inline u64 get_hop2_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP2_MASK, HOP2_SHIFT); +} + +static inline u64 get_hop3_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP3_MASK, HOP3_SHIFT); +} + +static inline u64 get_hop4_pte_addr(struct hl_ctx *ctx, u64 hop_addr, u64 vaddr) +{ + return get_hopN_pte_addr(ctx, hop_addr, vaddr, HOP4_MASK, HOP4_SHIFT); +} + +static inline u64 get_next_hop_addr(u64 curr_pte) +{ + if (curr_pte & PAGE_PRESENT_MASK) + return curr_pte & PHYS_ADDR_MASK; + else + return ULLONG_MAX; +} + +static inline u64 get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte, + bool *is_new_hop) +{ + u64 hop_addr = get_next_hop_addr(curr_pte); + + if (hop_addr == ULLONG_MAX) { + hop_addr = alloc_hop(ctx); + *is_new_hop = true; + } + + return hop_addr; +} + +/* + * hl_mmu_init - init the mmu module + * + * @hdev: pointer to the habanalabs device structure + * + * This function does the following: + * - Allocate max_asid zeroed hop0 pgts so no mapping is available + * - Enable mmu in hw + * - Invalidate the mmu cache + * - Create a pool of pages for pgts + * - Returns 0 on success + * + * This function depends on DMA QMAN to be working! + */ +int hl_mmu_init(struct hl_device *hdev) +{ + struct asic_fixed_properties *prop = &hdev->asic_prop; + int rc; + + if (!hdev->mmu_enable) + return 0; + + /* MMU HW init was already done in device hw_init() */ + + mutex_init(&hdev->mmu_cache_lock); + + hdev->mmu_pgt_pool = + gen_pool_create(__ffs(prop->mmu_hop_table_size), -1); + + if (!hdev->mmu_pgt_pool) { + dev_err(hdev->dev, "Failed to create page gen pool\n"); + rc = -ENOMEM; + goto err_pool_create; + } + + rc = gen_pool_add(hdev->mmu_pgt_pool, prop->mmu_pgt_addr + + prop->mmu_hop0_tables_total_size, + prop->mmu_pgt_size - prop->mmu_hop0_tables_total_size, + -1); + if (rc) { + dev_err(hdev->dev, "Failed to add memory to page gen pool\n"); + goto err_pool_add; + } + + return 0; + +err_pool_add: + gen_pool_destroy(hdev->mmu_pgt_pool); +err_pool_create: + mutex_destroy(&hdev->mmu_cache_lock); + + return rc; +} + +/* + * hl_mmu_fini - release the mmu module. + * + * @hdev: pointer to the habanalabs device structure + * + * This function does the following: + * - Disable mmu in hw + * - free the pgts pool + * + * All ctxs should be freed before calling this func + */ +void hl_mmu_fini(struct hl_device *hdev) +{ + if (!hdev->mmu_enable) + return; + + gen_pool_destroy(hdev->mmu_pgt_pool); + + mutex_destroy(&hdev->mmu_cache_lock); + + /* MMU HW fini will be done in device hw_fini() */ +} + +/* + * hl_mmu_ctx_init - init a ctx for using the mmu module + * + * @ctx: pointer to the context structure + * + * This function does the following: + * - Init a mutex to protect the concurrent mapping flow + * - Init a hash to hold all pgts related to this ctx + */ +void hl_mmu_ctx_init(struct hl_ctx *ctx) +{ + if (!ctx->hdev->mmu_enable) + return; + + mutex_init(&ctx->mmu_lock); + hash_init(ctx->mmu_hash); +} + +/* + * hl_mmu_ctx_fini - disable a ctx from using the mmu module + * + * @ctx: pointer to the context structure + * + * This function does the following: + * - Free any pgts which were not freed yet + * - Free the mutex + */ +void hl_mmu_ctx_fini(struct hl_ctx *ctx) +{ + struct pgt_info *pgt_info; + struct hlist_node *tmp; + int i; + + if (!ctx->hdev->mmu_enable) + return; + + if (!hash_empty(ctx->mmu_hash)) + dev_err(ctx->hdev->dev, + "ctx is freed while it has pgts in use\n"); + + hash_for_each_safe(ctx->mmu_hash, i, tmp, pgt_info, node) { + dev_err(ctx->hdev->dev, + "pgt_info of addr 0x%llx of asid %d was not destroyed, num_ptes: %d\n", + pgt_info->addr, ctx->asid, pgt_info->num_of_ptes); + free_hop(ctx, pgt_info->addr); + } + + mutex_destroy(&ctx->mmu_lock); +} + +static int _hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr) +{ + struct hl_device *hdev = ctx->hdev; + u64 hop0_addr = 0, hop0_pte_addr = 0, + hop1_addr = 0, hop1_pte_addr = 0, + hop2_addr = 0, hop2_pte_addr = 0, + hop3_addr = 0, hop3_pte_addr = 0, + hop4_addr = 0, hop4_pte_addr = 0, + curr_pte; + int clear_hop3 = 1; + + hop0_addr = get_hop0_addr(ctx); + + hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr); + + curr_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr); + + hop1_addr = get_next_hop_addr(curr_pte); + + if (hop1_addr == ULLONG_MAX) + goto not_mapped; + + hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr); + + curr_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr); + + hop2_addr = get_next_hop_addr(curr_pte); + + if (hop2_addr == ULLONG_MAX) + goto not_mapped; + + hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr); + + curr_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr); + + hop3_addr = get_next_hop_addr(curr_pte); + + if (hop3_addr == ULLONG_MAX) + goto not_mapped; + + hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr); + + curr_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr); + + if (!(curr_pte & LAST_MASK)) { + hop4_addr = get_next_hop_addr(curr_pte); + + if (hop4_addr == ULLONG_MAX) + goto not_mapped; + + hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr); + + curr_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr); + + clear_hop3 = 0; + } + + if (!(curr_pte & PAGE_PRESENT_MASK)) + goto not_mapped; + + clear_pte(hdev, hop4_addr ? hop4_pte_addr : hop3_pte_addr); + + if (hop4_addr && !put_pte(ctx, hop4_addr)) + clear_hop3 = 1; + + if (!clear_hop3) + goto flush; + clear_pte(hdev, hop3_pte_addr); + + if (put_pte(ctx, hop3_addr)) + goto flush; + clear_pte(hdev, hop2_pte_addr); + + if (put_pte(ctx, hop2_addr)) + goto flush; + clear_pte(hdev, hop1_pte_addr); + + if (put_pte(ctx, hop1_addr)) + goto flush; + clear_pte(hdev, hop0_pte_addr); + +flush: + /* flush all writes from all cores to reach PCI */ + mb(); + + hdev->asic_funcs->read_pte(hdev, + hop4_addr ? hop4_pte_addr : hop3_pte_addr); + + return 0; + +not_mapped: + dev_err(hdev->dev, "virt addr 0x%llx is not mapped to phys addr\n", + virt_addr); + + return -EINVAL; +} + +/* + * hl_mmu_unmap - unmaps a virtual addr + * + * @ctx: pointer to the context structure + * @virt_addr: virt addr to map from + * @page_size: size of the page to unmap + * + * This function does the following: + * - Check that the virt addr is mapped + * - Unmap the virt addr and frees pgts if possible + * - Returns 0 on success, -EINVAL if the given addr is not mapped + * + * Because this function changes the page tables in the device and because it + * changes the MMU hash, it must be protected by a lock. + * However, because it maps only a single page, the lock should be implemented + * in a higher level in order to protect the entire mapping of the memory area + */ +int hl_mmu_unmap(struct hl_ctx *ctx, u64 virt_addr, u32 page_size) +{ + struct hl_device *hdev = ctx->hdev; + u64 real_virt_addr; + u32 real_page_size, npages; + int i, rc; + + if (!hdev->mmu_enable) + return 0; + + /* + * The H/W handles mapping of 4KB/2MB page. Hence if the host page size + * is bigger, we break it to sub-pages and unmap them separately. + */ + if ((page_size % PAGE_SIZE_2MB) == 0) { + real_page_size = PAGE_SIZE_2MB; + } else if ((page_size % PAGE_SIZE_4KB) == 0) { + real_page_size = PAGE_SIZE_4KB; + } else { + dev_err(hdev->dev, + "page size of %u is not 4KB nor 2MB aligned, can't unmap\n", + page_size); + + return -EFAULT; + } + + npages = page_size / real_page_size; + real_virt_addr = virt_addr; + + for (i = 0 ; i < npages ; i++) { + rc = _hl_mmu_unmap(ctx, real_virt_addr); + if (rc) + return rc; + + real_virt_addr += real_page_size; + } + + return 0; +} + +static int _hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, + u32 page_size) +{ + struct hl_device *hdev = ctx->hdev; + u64 hop0_addr = 0, hop0_pte_addr = 0, + hop1_addr = 0, hop1_pte_addr = 0, + hop2_addr = 0, hop2_pte_addr = 0, + hop3_addr = 0, hop3_pte_addr = 0, + hop4_addr = 0, hop4_pte_addr = 0, + curr_pte = 0; + bool hop1_new = false, hop2_new = false, hop3_new = false, + hop4_new = false, is_huge; + int rc = -ENOMEM; + + /* + * This mapping function can map a 4KB/2MB page. For 2MB page there are + * only 3 hops rather than 4. Currently the DRAM allocation uses 2MB + * pages only but user memory could have been allocated with one of the + * two page sizes. Since this is a common code for all the three cases, + * we need this hugs page check. + */ + is_huge = page_size == PAGE_SIZE_2MB; + + hop0_addr = get_hop0_addr(ctx); + + hop0_pte_addr = get_hop0_pte_addr(ctx, hop0_addr, virt_addr); + + curr_pte = hdev->asic_funcs->read_pte(hdev, hop0_pte_addr); + + hop1_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop1_new); + + if (hop1_addr == ULLONG_MAX) + goto err; + + hop1_pte_addr = get_hop1_pte_addr(ctx, hop1_addr, virt_addr); + + curr_pte = hdev->asic_funcs->read_pte(hdev, hop1_pte_addr); + + hop2_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop2_new); + + if (hop2_addr == ULLONG_MAX) + goto err; + + hop2_pte_addr = get_hop2_pte_addr(ctx, hop2_addr, virt_addr); + + curr_pte = hdev->asic_funcs->read_pte(hdev, hop2_pte_addr); + + hop3_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop3_new); + + if (hop3_addr == ULLONG_MAX) + goto err; + + hop3_pte_addr = get_hop3_pte_addr(ctx, hop3_addr, virt_addr); + + curr_pte = hdev->asic_funcs->read_pte(hdev, hop3_pte_addr); + + if (!is_huge) { + hop4_addr = get_alloc_next_hop_addr(ctx, curr_pte, &hop4_new); + + if (hop4_addr == ULLONG_MAX) + goto err; + + hop4_pte_addr = get_hop4_pte_addr(ctx, hop4_addr, virt_addr); + + curr_pte = hdev->asic_funcs->read_pte(hdev, hop4_pte_addr); + } + + if (curr_pte & PAGE_PRESENT_MASK) { + dev_err(hdev->dev, + "mapping already exists for virt_addr 0x%llx\n", + virt_addr); + + dev_dbg(hdev->dev, "hop0 pte: 0x%llx (0x%llx)\n", + hdev->asic_funcs->read_pte(hdev, hop0_pte_addr), + hop0_pte_addr); + dev_dbg(hdev->dev, "hop1 pte: 0x%llx (0x%llx)\n", + hdev->asic_funcs->read_pte(hdev, hop1_pte_addr), + hop1_pte_addr); + dev_dbg(hdev->dev, "hop2 pte: 0x%llx (0x%llx)\n", + hdev->asic_funcs->read_pte(hdev, hop2_pte_addr), + hop2_pte_addr); + dev_dbg(hdev->dev, "hop3 pte: 0x%llx (0x%llx)\n", + hdev->asic_funcs->read_pte(hdev, hop3_pte_addr), + hop3_pte_addr); + + if (!is_huge) + dev_dbg(hdev->dev, "hop4 pte: 0x%llx (0x%llx)\n", + hdev->asic_funcs->read_pte(hdev, + hop4_pte_addr), + hop4_pte_addr); + + rc = EINVAL; + goto err; + } + + curr_pte = (phys_addr & PTE_PHYS_ADDR_MASK) | LAST_MASK + | PAGE_PRESENT_MASK; + + hdev->asic_funcs->write_pte(hdev, + is_huge ? hop3_pte_addr : hop4_pte_addr, + curr_pte); + + if (hop1_new) { + curr_pte = (hop1_addr & PTE_PHYS_ADDR_MASK) | + PAGE_PRESENT_MASK; + ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop0_pte_addr, + curr_pte); + } + if (hop2_new) { + curr_pte = (hop2_addr & PTE_PHYS_ADDR_MASK) | + PAGE_PRESENT_MASK; + ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop1_pte_addr, + curr_pte); + get_pte(ctx, hop1_addr); + } + if (hop3_new) { + curr_pte = (hop3_addr & PTE_PHYS_ADDR_MASK) | + PAGE_PRESENT_MASK; + ctx->hdev->asic_funcs->write_pte(ctx->hdev, hop2_pte_addr, + curr_pte); + get_pte(ctx, hop2_addr); + } + + if (!is_huge) { + if (hop4_new) { + curr_pte = (hop4_addr & PTE_PHYS_ADDR_MASK) | + PAGE_PRESENT_MASK; + ctx->hdev->asic_funcs->write_pte(ctx->hdev, + hop3_pte_addr, curr_pte); + get_pte(ctx, hop3_addr); + } + + get_pte(ctx, hop4_addr); + } else { + get_pte(ctx, hop3_addr); + } + + /* flush all writes from all cores to reach PCI */ + mb(); + + hdev->asic_funcs->read_pte(hdev, + is_huge ? hop3_pte_addr : hop4_pte_addr); + + return 0; + +err: + if (hop4_new) + free_hop(ctx, hop4_addr); + if (hop3_new) + free_hop(ctx, hop3_addr); + if (hop2_new) + free_hop(ctx, hop2_addr); + if (hop1_new) + free_hop(ctx, hop1_addr); + + return rc; +} + +/* + * hl_mmu_map - maps a virtual addr to physical addr + * + * @ctx: pointer to the context structure + * @virt_addr: virt addr to map from + * @phys_addr: phys addr to map to + * @page_size: physical page size + * + * This function does the following: + * - Check that the virt addr is not mapped + * - Allocate pgts as necessary in order to map the virt addr to the phys + * - Returns 0 on success, -EINVAL if addr is already mapped, or -ENOMEM. + * + * Because this function changes the page tables in the device and because it + * changes the MMU hash, it must be protected by a lock. + * However, because it maps only a single page, the lock should be implemented + * in a higher level in order to protect the entire mapping of the memory area + */ +int hl_mmu_map(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size) +{ + struct hl_device *hdev = ctx->hdev; + u64 real_virt_addr; + u32 real_page_size, npages; + int i, rc, mapped_cnt = 0; + + if (!hdev->mmu_enable) + return 0; + + /* + * The H/W handles mapping of 4KB/2MB page. Hence if the host page size + * is bigger, we break it to sub-pages and map them separately. + */ + if ((page_size % PAGE_SIZE_2MB) == 0) { + real_page_size = PAGE_SIZE_2MB; + } else if ((page_size % PAGE_SIZE_4KB) == 0) { + real_page_size = PAGE_SIZE_4KB; + } else { + dev_err(hdev->dev, + "page size of %u is not 4KB nor 2MB aligned, can't map\n", + page_size); + + return -EFAULT; + } + + npages = page_size / real_page_size; + real_virt_addr = virt_addr; + + for (i = 0 ; i < npages ; i++) { + rc = _hl_mmu_map(ctx, real_virt_addr, phys_addr, + real_page_size); + if (rc) + goto err; + + real_virt_addr += real_page_size; + mapped_cnt++; + } + + return 0; + +err: + real_virt_addr = virt_addr; + for (i = 0 ; i < mapped_cnt ; i++) { + if (_hl_mmu_unmap(ctx, real_virt_addr)) + dev_warn_ratelimited(hdev->dev, + "failed to unmap va: 0x%llx\n", real_virt_addr); + + real_virt_addr += real_page_size; + } + + return rc; +} + +/* + * hl_mmu_swap_out - marks all mapping of the given ctx as swapped out + * + * @ctx: pointer to the context structure + * + */ +void hl_mmu_swap_out(struct hl_ctx *ctx) +{ + +} + +/* + * hl_mmu_swap_in - marks all mapping of the given ctx as swapped in + * + * @ctx: pointer to the context structure + * + */ +void hl_mmu_swap_in(struct hl_ctx *ctx) +{ + +} diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index fba49417f607..9015043887d1 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -162,6 +162,108 @@ union hl_wait_cs_args { struct hl_wait_cs_out out; }; +/* Opcode to alloc device memory */ +#define HL_MEM_OP_ALLOC 0 +/* Opcode to free previously allocated device memory */ +#define HL_MEM_OP_FREE 1 +/* Opcode to map host memory */ +#define HL_MEM_OP_MAP 2 +/* Opcode to unmap previously mapped host memory */ +#define HL_MEM_OP_UNMAP 3 + +/* Memory flags */ +#define HL_MEM_CONTIGUOUS 0x1 +#define HL_MEM_SHARED 0x2 +#define HL_MEM_USERPTR 0x4 + +struct hl_mem_in { + union { + /* HL_MEM_OP_ALLOC- allocate device memory */ + struct { + /* Size to alloc */ + __u32 mem_size; + __u32 pad; + } alloc; + + /* HL_MEM_OP_FREE - free device memory */ + struct { + /* Handle returned from HL_MEM_OP_ALLOC */ + __u64 handle; + } free; + + /* HL_MEM_OP_MAP - map device memory */ + struct { + /* + * Requested virtual address of mapped memory. + * KMD will try to map the requested region to this + * hint address, as long as the address is valid and + * not already mapped. The user should check the + * returned address of the IOCTL to make sure he got + * the hint address. Passing 0 here means that KMD + * will choose the address itself. + */ + __u64 hint_addr; + /* Handle returned from HL_MEM_OP_ALLOC */ + __u64 handle; + } map_device; + + /* HL_MEM_OP_MAP - map host memory */ + struct { + /* Address of allocated host memory */ + __u64 host_virt_addr; + /* + * Requested virtual address of mapped memory. + * KMD will try to map the requested region to this + * hint address, as long as the address is valid and + * not already mapped. The user should check the + * returned address of the IOCTL to make sure he got + * the hint address. Passing 0 here means that KMD + * will choose the address itself. + */ + __u64 hint_addr; + /* Size of allocated host memory */ + __u32 mem_size; + __u32 pad; + } map_host; + + /* HL_MEM_OP_UNMAP - unmap host memory */ + struct { + /* Virtual address returned from HL_MEM_OP_MAP */ + __u64 device_virt_addr; + } unmap; + }; + + /* HL_MEM_OP_* */ + __u32 op; + /* HL_MEM_* flags */ + __u32 flags; + /* Context ID - Currently not in use */ + __u32 ctx_id; + __u32 pad; +}; + +struct hl_mem_out { + union { + /* + * Used for HL_MEM_OP_MAP as the virtual address that was + * assigned in the device VA space. + * A value of 0 means the requested operation failed. + */ + __u64 device_virt_addr; + + /* + * Used for HL_MEM_OP_ALLOC. This is the assigned + * handle for the allocated memory + */ + __u64 handle; + }; +}; + +union hl_mem_args { + struct hl_mem_in in; + struct hl_mem_out out; +}; + /* * Command Buffer * - Request a Command Buffer @@ -245,7 +347,25 @@ union hl_wait_cs_args { #define HL_IOCTL_WAIT_CS \ _IOWR('H', 0x04, union hl_wait_cs_args) +/* + * Memory + * - Map host memory to device MMU + * - Unmap host memory from device MMU + * + * This IOCTL allows the user to map host memory to the device MMU + * + * For host memory, the IOCTL doesn't allocate memory. The user is supposed + * to allocate the memory in user-space (malloc/new). The driver pins the + * physical pages (up to the allowed limit by the OS), assigns a virtual + * address in the device VA space and initializes the device MMU. + * + * There is an option for the user to specify the requested virtual address. + * + */ +#define HL_IOCTL_MEMORY \ + _IOWR('H', 0x05, union hl_mem_args) + #define HL_COMMAND_START 0x02 -#define HL_COMMAND_END 0x05 +#define HL_COMMAND_END 0x06 #endif /* HABANALABS_H_ */ -- cgit v1.2.3-71-gd317 From d8dd7b0a81cc192ef5d30ec76ed6f6d35a1a7cf5 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Sat, 16 Feb 2019 00:39:23 +0200 Subject: habanalabs: implement INFO IOCTL This patch implements the INFO IOCTL. That IOCTL is used by the user to query information that is relevant/needed by the user in order to submit deep learning jobs to Goya. The information is divided into several categories, such as H/W IP, Events that happened, DDR usage and more. Reviewed-by: Mike Rapoport Signed-off-by: Oded Gabbay Signed-off-by: Greg Kroah-Hartman --- drivers/misc/habanalabs/goya/goya.c | 6 ++ drivers/misc/habanalabs/habanalabs.h | 2 + drivers/misc/habanalabs/habanalabs_ioctl.c | 126 +++++++++++++++++++++++++++++ include/uapi/misc/habanalabs.h | 75 ++++++++++++++++- 4 files changed, 208 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index 89b82b989966..bf3f76f1aeae 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -5131,6 +5131,11 @@ static void goya_hw_queues_unlock(struct hl_device *hdev) spin_unlock(&goya->hw_queues_lock); } +static u32 goya_get_pci_id(struct hl_device *hdev) +{ + return hdev->pdev->device; +} + int goya_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size) { struct goya_device *goya = hdev->asic_specific; @@ -5232,6 +5237,7 @@ static const struct hl_asic_funcs goya_funcs = { .soft_reset_late_init = goya_soft_reset_late_init, .hw_queues_lock = goya_hw_queues_lock, .hw_queues_unlock = goya_hw_queues_unlock, + .get_pci_id = goya_get_pci_id, .get_eeprom_data = goya_get_eeprom_data, .send_cpu_message = goya_send_cpu_message, .get_hw_state = goya_get_hw_state diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h index 03085e7a12dd..02de4a2cab27 100644 --- a/drivers/misc/habanalabs/habanalabs.h +++ b/drivers/misc/habanalabs/habanalabs.h @@ -470,6 +470,7 @@ enum hl_pll_frequency { * @soft_reset_late_init: perform certain actions needed after soft reset. * @hw_queues_lock: acquire H/W queues lock. * @hw_queues_unlock: release H/W queues lock. + * @get_pci_id: retrieve PCI ID. * @get_eeprom_data: retrieve EEPROM data from F/W. * @send_cpu_message: send buffer to ArmCP. * @get_hw_state: retrieve the H/W state @@ -539,6 +540,7 @@ struct hl_asic_funcs { int (*soft_reset_late_init)(struct hl_device *hdev); void (*hw_queues_lock)(struct hl_device *hdev); void (*hw_queues_unlock)(struct hl_device *hdev); + u32 (*get_pci_id)(struct hl_device *hdev); int (*get_eeprom_data)(struct hl_device *hdev, void *data, size_t max_size); int (*send_cpu_message)(struct hl_device *hdev, u32 *msg, diff --git a/drivers/misc/habanalabs/habanalabs_ioctl.c b/drivers/misc/habanalabs/habanalabs_ioctl.c index 6e4dc5b5e696..12408d3302e9 100644 --- a/drivers/misc/habanalabs/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/habanalabs_ioctl.c @@ -12,10 +12,136 @@ #include #include +static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args) +{ + struct hl_info_hw_ip_info hw_ip = {0}; + u32 size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 sram_kmd_size, dram_kmd_size; + + if ((!size) || (!out)) + return -EINVAL; + + sram_kmd_size = (prop->sram_user_base_address - + prop->sram_base_address); + dram_kmd_size = (prop->dram_user_base_address - + prop->dram_base_address); + + hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev); + hw_ip.sram_base_address = prop->sram_user_base_address; + hw_ip.dram_base_address = prop->dram_user_base_address; + hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask; + hw_ip.sram_size = prop->sram_size - sram_kmd_size; + hw_ip.dram_size = prop->dram_size - dram_kmd_size; + if (hw_ip.dram_size > 0) + hw_ip.dram_enabled = 1; + hw_ip.num_of_events = prop->num_of_events; + memcpy(hw_ip.armcp_version, + prop->armcp_info.armcp_version, VERSION_MAX_LEN); + hw_ip.armcp_cpld_version = prop->armcp_info.cpld_version; + hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr; + hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf; + hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od; + hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor; + + return copy_to_user(out, &hw_ip, + min((size_t)size, sizeof(hw_ip))) ? -EFAULT : 0; +} + +static int hw_events_info(struct hl_device *hdev, struct hl_info_args *args) +{ + u32 size, max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + void *arr; + + if ((!max_size) || (!out)) + return -EINVAL; + + arr = hdev->asic_funcs->get_events_stat(hdev, &size); + + return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0; +} + +static int dram_usage_info(struct hl_device *hdev, struct hl_info_args *args) +{ + struct hl_info_dram_usage dram_usage = {0}; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + struct asic_fixed_properties *prop = &hdev->asic_prop; + u64 dram_kmd_size; + + if ((!max_size) || (!out)) + return -EINVAL; + + dram_kmd_size = (prop->dram_user_base_address - + prop->dram_base_address); + dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) - + atomic64_read(&hdev->dram_used_mem); + dram_usage.ctx_dram_mem = atomic64_read(&hdev->user_ctx->dram_phys_mem); + + return copy_to_user(out, &dram_usage, + min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0; +} + +static int hw_idle(struct hl_device *hdev, struct hl_info_args *args) +{ + struct hl_info_hw_idle hw_idle = {0}; + u32 max_size = args->return_size; + void __user *out = (void __user *) (uintptr_t) args->return_pointer; + + if ((!max_size) || (!out)) + return -EINVAL; + + hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev); + + return copy_to_user(out, &hw_idle, + min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0; +} + +static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data) +{ + struct hl_info_args *args = data; + struct hl_device *hdev = hpriv->hdev; + int rc; + + if (hl_device_disabled_or_in_reset(hdev)) { + dev_err(hdev->dev, + "Device is disabled or in reset. Can't execute INFO IOCTL\n"); + return -EBUSY; + } + + switch (args->op) { + case HL_INFO_HW_IP_INFO: + rc = hw_ip_info(hdev, args); + break; + + case HL_INFO_HW_EVENTS: + rc = hw_events_info(hdev, args); + break; + + case HL_INFO_DRAM_USAGE: + rc = dram_usage_info(hdev, args); + break; + + case HL_INFO_HW_IDLE: + rc = hw_idle(hdev, args); + break; + + default: + dev_err(hdev->dev, "Invalid request %d\n", args->op); + rc = -ENOTTY; + break; + } + + return rc; +} + #define HL_IOCTL_DEF(ioctl, _func) \ [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func} static const struct hl_ioctl_desc hl_ioctls[] = { + HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl), HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl), HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl), HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_cs_wait_ioctl), diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 9015043887d1..4afc1891ece8 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -45,6 +45,62 @@ enum goya_queue_id { GOYA_QUEUE_ID_SIZE }; +/* Opcode for management ioctl */ +#define HL_INFO_HW_IP_INFO 0 +#define HL_INFO_HW_EVENTS 1 +#define HL_INFO_DRAM_USAGE 2 +#define HL_INFO_HW_IDLE 3 + +#define HL_INFO_VERSION_MAX_LEN 128 + +struct hl_info_hw_ip_info { + __u64 sram_base_address; + __u64 dram_base_address; + __u64 dram_size; + __u32 sram_size; + __u32 num_of_events; + __u32 device_id; /* PCI Device ID */ + __u32 reserved[3]; + __u32 armcp_cpld_version; + __u32 psoc_pci_pll_nr; + __u32 psoc_pci_pll_nf; + __u32 psoc_pci_pll_od; + __u32 psoc_pci_pll_div_factor; + __u8 tpc_enabled_mask; + __u8 dram_enabled; + __u8 pad[2]; + __u8 armcp_version[HL_INFO_VERSION_MAX_LEN]; +}; + +struct hl_info_dram_usage { + __u64 dram_free_mem; + __u64 ctx_dram_mem; +}; + +struct hl_info_hw_idle { + __u32 is_idle; + __u32 pad; +}; + +struct hl_info_args { + /* Location of relevant struct in userspace */ + __u64 return_pointer; + /* + * The size of the return value. Just like "size" in "snprintf", + * it limits how many bytes the kernel can write + * + * For hw_events array, the size should be + * hl_info_hw_ip_info.num_of_events * sizeof(__u32) + */ + __u32 return_size; + + /* HL_INFO_* */ + __u32 op; + + /* Context ID - Currently not in use */ + __u32 ctx_id; + __u32 pad; +}; /* Opcode to create a new command buffer */ #define HL_CB_OP_CREATE 0 @@ -264,6 +320,23 @@ union hl_mem_args { struct hl_mem_out out; }; +/* + * Various information operations such as: + * - H/W IP information + * - Current dram usage + * + * The user calls this IOCTL with an opcode that describes the required + * information. The user should supply a pointer to a user-allocated memory + * chunk, which will be filled by the driver with the requested information. + * + * The user supplies the maximum amount of size to copy into the user's memory, + * in order to prevent data corruption in case of differences between the + * definitions of structures in kernel and userspace, e.g. in case of old + * userspace and new kernel driver + */ +#define HL_IOCTL_INFO \ + _IOWR('H', 0x01, struct hl_info_args) + /* * Command Buffer * - Request a Command Buffer @@ -365,7 +438,7 @@ union hl_mem_args { #define HL_IOCTL_MEMORY \ _IOWR('H', 0x05, union hl_mem_args) -#define HL_COMMAND_START 0x02 +#define HL_COMMAND_START 0x01 #define HL_COMMAND_END 0x06 #endif /* HABANALABS_H_ */ -- cgit v1.2.3-71-gd317 From 35c0272502cca0a1b461d310c23aac94a503983d Mon Sep 17 00:00:00 2001 From: Ramalingam C Date: Sat, 16 Feb 2019 10:34:59 +0530 Subject: drm/audio: declaration of struct device Header has used the references to struct device without it definition or declaration. Hence resulting in compilation warning such as "'struct device' declared inside parameter list..." This changes adds a declaration to struct device in the header to avoid any such warnings. Signed-off-by: Ramalingam C cc: Takashi Iwai cc: Daniel Vetter Acked-by: Dave Airlie Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/1550293499-5560-1-git-send-email-ramalingam.c@intel.com --- include/drm/drm_audio_component.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include') diff --git a/include/drm/drm_audio_component.h b/include/drm/drm_audio_component.h index 4923b00328c1..93a386be38fa 100644 --- a/include/drm/drm_audio_component.h +++ b/include/drm/drm_audio_component.h @@ -5,6 +5,7 @@ #define _DRM_AUDIO_COMPONENT_H_ struct drm_audio_component; +struct device; /** * struct drm_audio_component_ops - Ops implemented by DRM driver, called by hda driver -- cgit v1.2.3-71-gd317 From f2db7361cb19bf3a6f7fd367f21d8eb325397946 Mon Sep 17 00:00:00 2001 From: Vishnu DASA Date: Fri, 15 Feb 2019 16:32:47 +0000 Subject: VMCI: Support upto 64-bit PPNs Add support in the VMCI driver to handle upto 64-bit PPNs when the VMCI device exposes the capability for 64-bit PPNs. Reviewed-by: Adit Ranadive Reviewed-by: Jorgen Hansen Signed-off-by: Vishnu Dasa Signed-off-by: Greg Kroah-Hartman --- drivers/misc/vmw_vmci/vmci_doorbell.c | 9 +++-- drivers/misc/vmw_vmci/vmci_doorbell.h | 2 +- drivers/misc/vmw_vmci/vmci_driver.h | 2 ++ drivers/misc/vmw_vmci/vmci_guest.c | 39 ++++++++++++++++---- drivers/misc/vmw_vmci/vmci_queue_pair.c | 63 +++++++++++++++------------------ drivers/misc/vmw_vmci/vmci_queue_pair.h | 4 +-- include/linux/vmw_vmci_defs.h | 7 ++-- 7 files changed, 77 insertions(+), 49 deletions(-) (limited to 'include') diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.c b/drivers/misc/vmw_vmci/vmci_doorbell.c index b3fa738ae005..7824c7494916 100644 --- a/drivers/misc/vmw_vmci/vmci_doorbell.c +++ b/drivers/misc/vmw_vmci/vmci_doorbell.c @@ -330,7 +330,7 @@ int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle) /* * Register the notification bitmap with the host. */ -bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn) +bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn) { int result; struct vmci_notify_bm_set_msg bitmap_set_msg; @@ -340,11 +340,14 @@ bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn) bitmap_set_msg.hdr.src = VMCI_ANON_SRC_HANDLE; bitmap_set_msg.hdr.payload_size = sizeof(bitmap_set_msg) - VMCI_DG_HEADERSIZE; - bitmap_set_msg.bitmap_ppn = bitmap_ppn; + if (vmci_use_ppn64()) + bitmap_set_msg.bitmap_ppn64 = bitmap_ppn; + else + bitmap_set_msg.bitmap_ppn32 = (u32) bitmap_ppn; result = vmci_send_datagram(&bitmap_set_msg.hdr); if (result != VMCI_SUCCESS) { - pr_devel("Failed to register (PPN=%u) as notification bitmap (error=%d)\n", + pr_devel("Failed to register (PPN=%llu) as notification bitmap (error=%d)\n", bitmap_ppn, result); return false; } diff --git a/drivers/misc/vmw_vmci/vmci_doorbell.h b/drivers/misc/vmw_vmci/vmci_doorbell.h index e4c0b17486a5..410a21f8436f 100644 --- a/drivers/misc/vmw_vmci/vmci_doorbell.h +++ b/drivers/misc/vmw_vmci/vmci_doorbell.h @@ -45,7 +45,7 @@ struct dbell_cpt_state { int vmci_dbell_host_context_notify(u32 src_cid, struct vmci_handle handle); int vmci_dbell_get_priv_flags(struct vmci_handle handle, u32 *priv_flags); -bool vmci_dbell_register_notification_bitmap(u32 bitmap_ppn); +bool vmci_dbell_register_notification_bitmap(u64 bitmap_ppn); void vmci_dbell_scan_notification_entries(u8 *bitmap); #endif /* VMCI_DOORBELL_H */ diff --git a/drivers/misc/vmw_vmci/vmci_driver.h b/drivers/misc/vmw_vmci/vmci_driver.h index cee9e977d318..2fbf4a0ac657 100644 --- a/drivers/misc/vmw_vmci/vmci_driver.h +++ b/drivers/misc/vmw_vmci/vmci_driver.h @@ -54,4 +54,6 @@ void vmci_guest_exit(void); bool vmci_guest_code_active(void); u32 vmci_get_vm_context_id(void); +bool vmci_use_ppn64(void); + #endif /* _VMCI_DRIVER_H_ */ diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c index dad5abee656e..928708128177 100644 --- a/drivers/misc/vmw_vmci/vmci_guest.c +++ b/drivers/misc/vmw_vmci/vmci_guest.c @@ -64,6 +64,13 @@ struct vmci_guest_device { dma_addr_t notification_base; }; +static bool use_ppn64; + +bool vmci_use_ppn64(void) +{ + return use_ppn64; +} + /* vmci_dev singleton device and supporting data*/ struct pci_dev *vmci_pdev; static struct vmci_guest_device *vmci_dev_g; @@ -432,6 +439,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, struct vmci_guest_device *vmci_dev; void __iomem *iobase; unsigned int capabilities; + unsigned int caps_in_use; unsigned long cmd; int vmci_err; int error; @@ -496,6 +504,23 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, error = -ENXIO; goto err_free_data_buffer; } + caps_in_use = VMCI_CAPS_DATAGRAM; + + /* + * Use 64-bit PPNs if the device supports. + * + * There is no check for the return value of dma_set_mask_and_coherent + * since this driver can handle the default mask values if + * dma_set_mask_and_coherent fails. + */ + if (capabilities & VMCI_CAPS_PPN64) { + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + use_ppn64 = true; + caps_in_use |= VMCI_CAPS_PPN64; + } else { + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); + use_ppn64 = false; + } /* * If the hardware supports notifications, we will use that as @@ -510,14 +535,14 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, "Unable to allocate notification bitmap\n"); } else { memset(vmci_dev->notification_bitmap, 0, PAGE_SIZE); - capabilities |= VMCI_CAPS_NOTIFICATIONS; + caps_in_use |= VMCI_CAPS_NOTIFICATIONS; } } - dev_info(&pdev->dev, "Using capabilities 0x%x\n", capabilities); + dev_info(&pdev->dev, "Using capabilities 0x%x\n", caps_in_use); /* Let the host know which capabilities we intend to use. */ - iowrite32(capabilities, vmci_dev->iobase + VMCI_CAPS_ADDR); + iowrite32(caps_in_use, vmci_dev->iobase + VMCI_CAPS_ADDR); /* Set up global device so that we can start sending datagrams */ spin_lock_irq(&vmci_dev_spinlock); @@ -529,13 +554,13 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, * Register notification bitmap with device if that capability is * used. */ - if (capabilities & VMCI_CAPS_NOTIFICATIONS) { + if (caps_in_use & VMCI_CAPS_NOTIFICATIONS) { unsigned long bitmap_ppn = vmci_dev->notification_base >> PAGE_SHIFT; if (!vmci_dbell_register_notification_bitmap(bitmap_ppn)) { dev_warn(&pdev->dev, - "VMCI device unable to register notification bitmap with PPN 0x%x\n", - (u32) bitmap_ppn); + "VMCI device unable to register notification bitmap with PPN 0x%lx\n", + bitmap_ppn); error = -ENXIO; goto err_remove_vmci_dev_g; } @@ -611,7 +636,7 @@ static int vmci_guest_probe_device(struct pci_dev *pdev, /* Enable specific interrupt bits. */ cmd = VMCI_IMR_DATAGRAM; - if (capabilities & VMCI_CAPS_NOTIFICATIONS) + if (caps_in_use & VMCI_CAPS_NOTIFICATIONS) cmd |= VMCI_IMR_NOTIFICATION; iowrite32(cmd, vmci_dev->iobase + VMCI_IMR_ADDR); diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index 264f4ed8eef2..f5f1aac9d163 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -435,8 +435,8 @@ static int qp_alloc_ppn_set(void *prod_q, void *cons_q, u64 num_consume_pages, struct ppn_set *ppn_set) { - u32 *produce_ppns; - u32 *consume_ppns; + u64 *produce_ppns; + u64 *consume_ppns; struct vmci_queue *produce_q = prod_q; struct vmci_queue *consume_q = cons_q; u64 i; @@ -462,31 +462,13 @@ static int qp_alloc_ppn_set(void *prod_q, return VMCI_ERROR_NO_MEM; } - for (i = 0; i < num_produce_pages; i++) { - unsigned long pfn; - + for (i = 0; i < num_produce_pages; i++) produce_ppns[i] = produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; - pfn = produce_ppns[i]; - - /* Fail allocation if PFN isn't supported by hypervisor. */ - if (sizeof(pfn) > sizeof(*produce_ppns) - && pfn != produce_ppns[i]) - goto ppn_error; - } - - for (i = 0; i < num_consume_pages; i++) { - unsigned long pfn; + for (i = 0; i < num_consume_pages; i++) consume_ppns[i] = consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT; - pfn = consume_ppns[i]; - - /* Fail allocation if PFN isn't supported by hypervisor. */ - if (sizeof(pfn) > sizeof(*consume_ppns) - && pfn != consume_ppns[i]) - goto ppn_error; - } ppn_set->num_produce_pages = num_produce_pages; ppn_set->num_consume_pages = num_consume_pages; @@ -494,11 +476,6 @@ static int qp_alloc_ppn_set(void *prod_q, ppn_set->consume_ppns = consume_ppns; ppn_set->initialized = true; return VMCI_SUCCESS; - - ppn_error: - kfree(produce_ppns); - kfree(consume_ppns); - return VMCI_ERROR_INVALID_ARGS; } /* @@ -520,12 +497,28 @@ static void qp_free_ppn_set(struct ppn_set *ppn_set) */ static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set) { - memcpy(call_buf, ppn_set->produce_ppns, - ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns)); - memcpy(call_buf + - ppn_set->num_produce_pages * sizeof(*ppn_set->produce_ppns), - ppn_set->consume_ppns, - ppn_set->num_consume_pages * sizeof(*ppn_set->consume_ppns)); + if (vmci_use_ppn64()) { + memcpy(call_buf, ppn_set->produce_ppns, + ppn_set->num_produce_pages * + sizeof(*ppn_set->produce_ppns)); + memcpy(call_buf + + ppn_set->num_produce_pages * + sizeof(*ppn_set->produce_ppns), + ppn_set->consume_ppns, + ppn_set->num_consume_pages * + sizeof(*ppn_set->consume_ppns)); + } else { + int i; + u32 *ppns = (u32 *) call_buf; + + for (i = 0; i < ppn_set->num_produce_pages; i++) + ppns[i] = (u32) ppn_set->produce_ppns[i]; + + ppns = &ppns[ppn_set->num_produce_pages]; + + for (i = 0; i < ppn_set->num_consume_pages; i++) + ppns[i] = (u32) ppn_set->consume_ppns[i]; + } return VMCI_SUCCESS; } @@ -951,13 +944,15 @@ static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry) { struct vmci_qp_alloc_msg *alloc_msg; size_t msg_size; + size_t ppn_size; int result; if (!entry || entry->num_ppns <= 2) return VMCI_ERROR_INVALID_ARGS; + ppn_size = vmci_use_ppn64() ? sizeof(u64) : sizeof(u32); msg_size = sizeof(*alloc_msg) + - (size_t) entry->num_ppns * sizeof(u32); + (size_t) entry->num_ppns * ppn_size; alloc_msg = kmalloc(msg_size, GFP_KERNEL); if (!alloc_msg) return VMCI_ERROR_NO_MEM; diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.h b/drivers/misc/vmw_vmci/vmci_queue_pair.h index ed177f04ef24..46c0b6c7bafb 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.h +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.h @@ -28,8 +28,8 @@ typedef int (*vmci_event_release_cb) (void *client_data); struct ppn_set { u64 num_produce_pages; u64 num_consume_pages; - u32 *produce_ppns; - u32 *consume_ppns; + u64 *produce_ppns; + u64 *consume_ppns; bool initialized; }; diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h index b724ef7005de..eaa1e762bf06 100644 --- a/include/linux/vmw_vmci_defs.h +++ b/include/linux/vmw_vmci_defs.h @@ -45,6 +45,7 @@ #define VMCI_CAPS_GUESTCALL 0x2 #define VMCI_CAPS_DATAGRAM 0x4 #define VMCI_CAPS_NOTIFICATIONS 0x8 +#define VMCI_CAPS_PPN64 0x10 /* Interrupt Cause register bits. */ #define VMCI_ICR_DATAGRAM 0x1 @@ -569,8 +570,10 @@ struct vmci_resource_query_msg { */ struct vmci_notify_bm_set_msg { struct vmci_datagram hdr; - u32 bitmap_ppn; - u32 _pad; + union { + u32 bitmap_ppn32; + u64 bitmap_ppn64; + }; }; /* -- cgit v1.2.3-71-gd317 From 230afe74d139f37ba5e344ad4e53d65911d12188 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Wed, 27 Feb 2019 00:19:18 +0200 Subject: habanalabs: allow memory allocations larger than 4GB This patch increase the size field in the uapi structure of the Memory IOCTL from 32-bit to 64-bit. This is to allow the user to allocate and/or map memory in chunks that are larger then 4GB. Goya's device memory (DRAM) can be up to 16GB, and for certain topologies, the user may want an allocation that is larger than 4GB. This change doesn't break current user-space because there was a "pad" field in the uapi structure right after the size field. Changing the size field to be 64-bit and removing the pad field maintains compatibility with current user-space. Signed-off-by: Oded Gabbay Signed-off-by: Greg Kroah-Hartman --- drivers/misc/habanalabs/habanalabs.h | 2 +- drivers/misc/habanalabs/memory.c | 10 ++++------ include/uapi/misc/habanalabs.h | 6 ++---- 3 files changed, 7 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/drivers/misc/habanalabs/habanalabs.h b/drivers/misc/habanalabs/habanalabs.h index 901542d685e8..fdf517448599 100644 --- a/drivers/misc/habanalabs/habanalabs.h +++ b/drivers/misc/habanalabs/habanalabs.h @@ -1320,7 +1320,7 @@ void hl_vm_ctx_fini(struct hl_ctx *ctx); int hl_vm_init(struct hl_device *hdev); void hl_vm_fini(struct hl_device *hdev); -int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u32 size, +int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size, struct hl_userptr *userptr); int hl_unpin_host_memory(struct hl_device *hdev, struct hl_userptr *userptr); void hl_userptr_delete_list(struct hl_device *hdev, diff --git a/drivers/misc/habanalabs/memory.c b/drivers/misc/habanalabs/memory.c index 9e3491dc3b55..4b57d7ce50dd 100644 --- a/drivers/misc/habanalabs/memory.c +++ b/drivers/misc/habanalabs/memory.c @@ -1210,7 +1210,7 @@ out: * - Pins the physical pages * - Create a SG list from those pages */ -int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u32 size, +int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u64 size, struct hl_userptr *userptr) { u64 start, end; @@ -1218,14 +1218,12 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u32 size, int rc; if (!size) { - dev_err(hdev->dev, "size to pin is invalid - %d\n", - size); + dev_err(hdev->dev, "size to pin is invalid - %llu\n", size); return -EINVAL; } if (!access_ok((void __user *) (uintptr_t) addr, size)) { - dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", - addr); + dev_err(hdev->dev, "user pointer is invalid - 0x%llx\n", addr); return -EFAULT; } @@ -1236,7 +1234,7 @@ int hl_pin_host_memory(struct hl_device *hdev, u64 addr, u32 size, if (((addr + size) < addr) || PAGE_ALIGN(addr + size) < (addr + size)) { dev_err(hdev->dev, - "user pointer 0x%llx + %u causes integer overflow\n", + "user pointer 0x%llx + %llu causes integer overflow\n", addr, size); return -EINVAL; } diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 4afc1891ece8..23d6ad3459cb 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -237,8 +237,7 @@ struct hl_mem_in { /* HL_MEM_OP_ALLOC- allocate device memory */ struct { /* Size to alloc */ - __u32 mem_size; - __u32 pad; + __u64 mem_size; } alloc; /* HL_MEM_OP_FREE - free device memory */ @@ -278,8 +277,7 @@ struct hl_mem_in { */ __u64 hint_addr; /* Size of allocated host memory */ - __u32 mem_size; - __u32 pad; + __u64 mem_size; } map_host; /* HL_MEM_OP_UNMAP - unmap host memory */ -- cgit v1.2.3-71-gd317 From 541664d360d1fdaa116473410265b2cb8a806b50 Mon Sep 17 00:00:00 2001 From: Oded Gabbay Date: Thu, 28 Feb 2019 11:55:44 +0200 Subject: habanalabs: add comments in uapi/misc/habanalabs.h Add comment about minimum and maximum size of command buffer. Add some text about the expected input of CS IOCTL. Signed-off-by: Oded Gabbay Signed-off-by: Greg Kroah-Hartman --- include/uapi/misc/habanalabs.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h index 23d6ad3459cb..7fd6f633534c 100644 --- a/include/uapi/misc/habanalabs.h +++ b/include/uapi/misc/habanalabs.h @@ -112,7 +112,9 @@ struct hl_cb_in { __u64 cb_handle; /* HL_CB_OP_* */ __u32 op; - /* Size of CB. Minimum requested size must be PAGE_SIZE */ + /* Size of CB. Maximum size is 2MB. The minimum size that will be + * allocated, regardless of this parameter's value, is PAGE_SIZE + */ __u32 cb_size; /* Context ID - Currently not in use */ __u32 ctx_id; @@ -364,6 +366,12 @@ union hl_mem_args { * internal. The driver will get completion notifications from the device only * on JOBS which are enqueued in the external queues. * + * For jobs on external queues, the user needs to create command buffers + * through the CB ioctl and give the CB's handle to the CS ioctl. For jobs on + * internal queues, the user needs to prepare a "command buffer" with packets + * on either the SRAM or DRAM, and give the device address of that buffer to + * the CS ioctl. + * * This IOCTL is asynchronous in regard to the actual execution of the CS. This * means it returns immediately after ALL the JOBS were enqueued on their * relevant queues. Therefore, the user mustn't assume the CS has been completed -- cgit v1.2.3-71-gd317