cachepc-linux

Fork of AMDESE/linux with modifications for CachePC side-channel attack
git clone https://git.sinitax.com/sinitax/cachepc-linux
Log | Files | Refs | README | LICENSE | sfeed.txt

smp.c (16484B)


      1// SPDX-License-Identifier: GPL-2.0-or-later
      2/*
      3 *
      4 * Copyright (C) 2000, 2001 Kanoj Sarcar
      5 * Copyright (C) 2000, 2001 Ralf Baechle
      6 * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
      7 * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
      8 */
      9#include <linux/cache.h>
     10#include <linux/delay.h>
     11#include <linux/init.h>
     12#include <linux/interrupt.h>
     13#include <linux/smp.h>
     14#include <linux/spinlock.h>
     15#include <linux/threads.h>
     16#include <linux/export.h>
     17#include <linux/time.h>
     18#include <linux/timex.h>
     19#include <linux/sched/mm.h>
     20#include <linux/cpumask.h>
     21#include <linux/cpu.h>
     22#include <linux/err.h>
     23#include <linux/ftrace.h>
     24#include <linux/irqdomain.h>
     25#include <linux/of.h>
     26#include <linux/of_irq.h>
     27
     28#include <linux/atomic.h>
     29#include <asm/cpu.h>
     30#include <asm/ginvt.h>
     31#include <asm/processor.h>
     32#include <asm/idle.h>
     33#include <asm/r4k-timer.h>
     34#include <asm/mips-cps.h>
     35#include <asm/mmu_context.h>
     36#include <asm/time.h>
     37#include <asm/setup.h>
     38#include <asm/maar.h>
     39
     40int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP];   /* Map physical to logical */
     41EXPORT_SYMBOL(__cpu_number_map);
     42
     43int __cpu_logical_map[NR_CPUS];		/* Map logical to physical */
     44EXPORT_SYMBOL(__cpu_logical_map);
     45
     46/* Number of TCs (or siblings in Intel speak) per CPU core */
     47int smp_num_siblings = 1;
     48EXPORT_SYMBOL(smp_num_siblings);
     49
     50/* representing the TCs (or siblings in Intel speak) of each logical CPU */
     51cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
     52EXPORT_SYMBOL(cpu_sibling_map);
     53
     54/* representing the core map of multi-core chips of each logical CPU */
     55cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
     56EXPORT_SYMBOL(cpu_core_map);
     57
     58static DECLARE_COMPLETION(cpu_starting);
     59static DECLARE_COMPLETION(cpu_running);
     60
     61/*
     62 * A logical cpu mask containing only one VPE per core to
     63 * reduce the number of IPIs on large MT systems.
     64 */
     65cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
     66EXPORT_SYMBOL(cpu_foreign_map);
     67
     68/* representing cpus for which sibling maps can be computed */
     69static cpumask_t cpu_sibling_setup_map;
     70
     71/* representing cpus for which core maps can be computed */
     72static cpumask_t cpu_core_setup_map;
     73
     74cpumask_t cpu_coherent_mask;
     75
     76#ifdef CONFIG_GENERIC_IRQ_IPI
     77static struct irq_desc *call_desc;
     78static struct irq_desc *sched_desc;
     79#endif
     80
     81static inline void set_cpu_sibling_map(int cpu)
     82{
     83	int i;
     84
     85	cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
     86
     87	if (smp_num_siblings > 1) {
     88		for_each_cpu(i, &cpu_sibling_setup_map) {
     89			if (cpus_are_siblings(cpu, i)) {
     90				cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
     91				cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
     92			}
     93		}
     94	} else
     95		cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
     96}
     97
     98static inline void set_cpu_core_map(int cpu)
     99{
    100	int i;
    101
    102	cpumask_set_cpu(cpu, &cpu_core_setup_map);
    103
    104	for_each_cpu(i, &cpu_core_setup_map) {
    105		if (cpu_data[cpu].package == cpu_data[i].package) {
    106			cpumask_set_cpu(i, &cpu_core_map[cpu]);
    107			cpumask_set_cpu(cpu, &cpu_core_map[i]);
    108		}
    109	}
    110}
    111
    112/*
    113 * Calculate a new cpu_foreign_map mask whenever a
    114 * new cpu appears or disappears.
    115 */
    116void calculate_cpu_foreign_map(void)
    117{
    118	int i, k, core_present;
    119	cpumask_t temp_foreign_map;
    120
    121	/* Re-calculate the mask */
    122	cpumask_clear(&temp_foreign_map);
    123	for_each_online_cpu(i) {
    124		core_present = 0;
    125		for_each_cpu(k, &temp_foreign_map)
    126			if (cpus_are_siblings(i, k))
    127				core_present = 1;
    128		if (!core_present)
    129			cpumask_set_cpu(i, &temp_foreign_map);
    130	}
    131
    132	for_each_online_cpu(i)
    133		cpumask_andnot(&cpu_foreign_map[i],
    134			       &temp_foreign_map, &cpu_sibling_map[i]);
    135}
    136
    137const struct plat_smp_ops *mp_ops;
    138EXPORT_SYMBOL(mp_ops);
    139
    140void register_smp_ops(const struct plat_smp_ops *ops)
    141{
    142	if (mp_ops)
    143		printk(KERN_WARNING "Overriding previously set SMP ops\n");
    144
    145	mp_ops = ops;
    146}
    147
    148#ifdef CONFIG_GENERIC_IRQ_IPI
    149void mips_smp_send_ipi_single(int cpu, unsigned int action)
    150{
    151	mips_smp_send_ipi_mask(cpumask_of(cpu), action);
    152}
    153
    154void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
    155{
    156	unsigned long flags;
    157	unsigned int core;
    158	int cpu;
    159
    160	local_irq_save(flags);
    161
    162	switch (action) {
    163	case SMP_CALL_FUNCTION:
    164		__ipi_send_mask(call_desc, mask);
    165		break;
    166
    167	case SMP_RESCHEDULE_YOURSELF:
    168		__ipi_send_mask(sched_desc, mask);
    169		break;
    170
    171	default:
    172		BUG();
    173	}
    174
    175	if (mips_cpc_present()) {
    176		for_each_cpu(cpu, mask) {
    177			if (cpus_are_siblings(cpu, smp_processor_id()))
    178				continue;
    179
    180			core = cpu_core(&cpu_data[cpu]);
    181
    182			while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
    183				mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
    184				mips_cpc_lock_other(core);
    185				write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
    186				mips_cpc_unlock_other();
    187				mips_cm_unlock_other();
    188			}
    189		}
    190	}
    191
    192	local_irq_restore(flags);
    193}
    194
    195
    196static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
    197{
    198	scheduler_ipi();
    199
    200	return IRQ_HANDLED;
    201}
    202
    203static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
    204{
    205	generic_smp_call_function_interrupt();
    206
    207	return IRQ_HANDLED;
    208}
    209
    210static void smp_ipi_init_one(unsigned int virq, const char *name,
    211			     irq_handler_t handler)
    212{
    213	int ret;
    214
    215	irq_set_handler(virq, handle_percpu_irq);
    216	ret = request_irq(virq, handler, IRQF_PERCPU, name, NULL);
    217	BUG_ON(ret);
    218}
    219
    220static unsigned int call_virq, sched_virq;
    221
    222int mips_smp_ipi_allocate(const struct cpumask *mask)
    223{
    224	int virq;
    225	struct irq_domain *ipidomain;
    226	struct device_node *node;
    227
    228	node = of_irq_find_parent(of_root);
    229	ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
    230
    231	/*
    232	 * Some platforms have half DT setup. So if we found irq node but
    233	 * didn't find an ipidomain, try to search for one that is not in the
    234	 * DT.
    235	 */
    236	if (node && !ipidomain)
    237		ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
    238
    239	/*
    240	 * There are systems which use IPI IRQ domains, but only have one
    241	 * registered when some runtime condition is met. For example a Malta
    242	 * kernel may include support for GIC & CPU interrupt controller IPI
    243	 * IRQ domains, but if run on a system with no GIC & no MT ASE then
    244	 * neither will be supported or registered.
    245	 *
    246	 * We only have a problem if we're actually using multiple CPUs so fail
    247	 * loudly if that is the case. Otherwise simply return, skipping IPI
    248	 * setup, if we're running with only a single CPU.
    249	 */
    250	if (!ipidomain) {
    251		BUG_ON(num_present_cpus() > 1);
    252		return 0;
    253	}
    254
    255	virq = irq_reserve_ipi(ipidomain, mask);
    256	BUG_ON(!virq);
    257	if (!call_virq)
    258		call_virq = virq;
    259
    260	virq = irq_reserve_ipi(ipidomain, mask);
    261	BUG_ON(!virq);
    262	if (!sched_virq)
    263		sched_virq = virq;
    264
    265	if (irq_domain_is_ipi_per_cpu(ipidomain)) {
    266		int cpu;
    267
    268		for_each_cpu(cpu, mask) {
    269			smp_ipi_init_one(call_virq + cpu, "IPI call",
    270					 ipi_call_interrupt);
    271			smp_ipi_init_one(sched_virq + cpu, "IPI resched",
    272					 ipi_resched_interrupt);
    273		}
    274	} else {
    275		smp_ipi_init_one(call_virq, "IPI call", ipi_call_interrupt);
    276		smp_ipi_init_one(sched_virq, "IPI resched",
    277				 ipi_resched_interrupt);
    278	}
    279
    280	return 0;
    281}
    282
    283int mips_smp_ipi_free(const struct cpumask *mask)
    284{
    285	struct irq_domain *ipidomain;
    286	struct device_node *node;
    287
    288	node = of_irq_find_parent(of_root);
    289	ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
    290
    291	/*
    292	 * Some platforms have half DT setup. So if we found irq node but
    293	 * didn't find an ipidomain, try to search for one that is not in the
    294	 * DT.
    295	 */
    296	if (node && !ipidomain)
    297		ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
    298
    299	BUG_ON(!ipidomain);
    300
    301	if (irq_domain_is_ipi_per_cpu(ipidomain)) {
    302		int cpu;
    303
    304		for_each_cpu(cpu, mask) {
    305			free_irq(call_virq + cpu, NULL);
    306			free_irq(sched_virq + cpu, NULL);
    307		}
    308	}
    309	irq_destroy_ipi(call_virq, mask);
    310	irq_destroy_ipi(sched_virq, mask);
    311	return 0;
    312}
    313
    314
    315static int __init mips_smp_ipi_init(void)
    316{
    317	if (num_possible_cpus() == 1)
    318		return 0;
    319
    320	mips_smp_ipi_allocate(cpu_possible_mask);
    321
    322	call_desc = irq_to_desc(call_virq);
    323	sched_desc = irq_to_desc(sched_virq);
    324
    325	return 0;
    326}
    327early_initcall(mips_smp_ipi_init);
    328#endif
    329
    330/*
    331 * First C code run on the secondary CPUs after being started up by
    332 * the master.
    333 */
    334asmlinkage void start_secondary(void)
    335{
    336	unsigned int cpu;
    337
    338	cpu_probe();
    339	per_cpu_trap_init(false);
    340	mips_clockevent_init();
    341	mp_ops->init_secondary();
    342	cpu_report();
    343	maar_init();
    344
    345	/*
    346	 * XXX parity protection should be folded in here when it's converted
    347	 * to an option instead of something based on .cputype
    348	 */
    349
    350	calibrate_delay();
    351	cpu = smp_processor_id();
    352	cpu_data[cpu].udelay_val = loops_per_jiffy;
    353
    354	set_cpu_sibling_map(cpu);
    355	set_cpu_core_map(cpu);
    356
    357	cpumask_set_cpu(cpu, &cpu_coherent_mask);
    358	notify_cpu_starting(cpu);
    359
    360	/* Notify boot CPU that we're starting & ready to sync counters */
    361	complete(&cpu_starting);
    362
    363	synchronise_count_slave(cpu);
    364
    365	/* The CPU is running and counters synchronised, now mark it online */
    366	set_cpu_online(cpu, true);
    367
    368	calculate_cpu_foreign_map();
    369
    370	/*
    371	 * Notify boot CPU that we're up & online and it can safely return
    372	 * from __cpu_up
    373	 */
    374	complete(&cpu_running);
    375
    376	/*
    377	 * irq will be enabled in ->smp_finish(), enabling it too early
    378	 * is dangerous.
    379	 */
    380	WARN_ON_ONCE(!irqs_disabled());
    381	mp_ops->smp_finish();
    382
    383	cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
    384}
    385
    386static void stop_this_cpu(void *dummy)
    387{
    388	/*
    389	 * Remove this CPU:
    390	 */
    391
    392	set_cpu_online(smp_processor_id(), false);
    393	calculate_cpu_foreign_map();
    394	local_irq_disable();
    395	while (1);
    396}
    397
    398void smp_send_stop(void)
    399{
    400	smp_call_function(stop_this_cpu, NULL, 0);
    401}
    402
    403void __init smp_cpus_done(unsigned int max_cpus)
    404{
    405}
    406
    407/* called from main before smp_init() */
    408void __init smp_prepare_cpus(unsigned int max_cpus)
    409{
    410	init_new_context(current, &init_mm);
    411	current_thread_info()->cpu = 0;
    412	mp_ops->prepare_cpus(max_cpus);
    413	set_cpu_sibling_map(0);
    414	set_cpu_core_map(0);
    415	calculate_cpu_foreign_map();
    416#ifndef CONFIG_HOTPLUG_CPU
    417	init_cpu_present(cpu_possible_mask);
    418#endif
    419	cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
    420}
    421
    422/* preload SMP state for boot cpu */
    423void smp_prepare_boot_cpu(void)
    424{
    425	if (mp_ops->prepare_boot_cpu)
    426		mp_ops->prepare_boot_cpu();
    427	set_cpu_possible(0, true);
    428	set_cpu_online(0, true);
    429}
    430
    431int __cpu_up(unsigned int cpu, struct task_struct *tidle)
    432{
    433	int err;
    434
    435	err = mp_ops->boot_secondary(cpu, tidle);
    436	if (err)
    437		return err;
    438
    439	/* Wait for CPU to start and be ready to sync counters */
    440	if (!wait_for_completion_timeout(&cpu_starting,
    441					 msecs_to_jiffies(1000))) {
    442		pr_crit("CPU%u: failed to start\n", cpu);
    443		return -EIO;
    444	}
    445
    446	synchronise_count_master(cpu);
    447
    448	/* Wait for CPU to finish startup & mark itself online before return */
    449	wait_for_completion(&cpu_running);
    450	return 0;
    451}
    452
    453/* Not really SMP stuff ... */
    454int setup_profiling_timer(unsigned int multiplier)
    455{
    456	return 0;
    457}
    458
    459static void flush_tlb_all_ipi(void *info)
    460{
    461	local_flush_tlb_all();
    462}
    463
    464void flush_tlb_all(void)
    465{
    466	if (cpu_has_mmid) {
    467		htw_stop();
    468		ginvt_full();
    469		sync_ginv();
    470		instruction_hazard();
    471		htw_start();
    472		return;
    473	}
    474
    475	on_each_cpu(flush_tlb_all_ipi, NULL, 1);
    476}
    477
    478static void flush_tlb_mm_ipi(void *mm)
    479{
    480	drop_mmu_context((struct mm_struct *)mm);
    481}
    482
    483/*
    484 * Special Variant of smp_call_function for use by TLB functions:
    485 *
    486 *  o No return value
    487 *  o collapses to normal function call on UP kernels
    488 *  o collapses to normal function call on systems with a single shared
    489 *    primary cache.
    490 */
    491static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
    492{
    493	smp_call_function(func, info, 1);
    494}
    495
    496static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
    497{
    498	preempt_disable();
    499
    500	smp_on_other_tlbs(func, info);
    501	func(info);
    502
    503	preempt_enable();
    504}
    505
    506/*
    507 * The following tlb flush calls are invoked when old translations are
    508 * being torn down, or pte attributes are changing. For single threaded
    509 * address spaces, a new context is obtained on the current cpu, and tlb
    510 * context on other cpus are invalidated to force a new context allocation
    511 * at switch_mm time, should the mm ever be used on other cpus. For
    512 * multithreaded address spaces, inter-CPU interrupts have to be sent.
    513 * Another case where inter-CPU interrupts are required is when the target
    514 * mm might be active on another cpu (eg debuggers doing the flushes on
    515 * behalf of debugees, kswapd stealing pages from another process etc).
    516 * Kanoj 07/00.
    517 */
    518
    519void flush_tlb_mm(struct mm_struct *mm)
    520{
    521	if (!mm)
    522		return;
    523
    524	if (atomic_read(&mm->mm_users) == 0)
    525		return;		/* happens as a result of exit_mmap() */
    526
    527	preempt_disable();
    528
    529	if (cpu_has_mmid) {
    530		/*
    531		 * No need to worry about other CPUs - the ginvt in
    532		 * drop_mmu_context() will be globalized.
    533		 */
    534	} else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
    535		smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
    536	} else {
    537		unsigned int cpu;
    538
    539		for_each_online_cpu(cpu) {
    540			if (cpu != smp_processor_id() && cpu_context(cpu, mm))
    541				set_cpu_context(cpu, mm, 0);
    542		}
    543	}
    544	drop_mmu_context(mm);
    545
    546	preempt_enable();
    547}
    548
    549struct flush_tlb_data {
    550	struct vm_area_struct *vma;
    551	unsigned long addr1;
    552	unsigned long addr2;
    553};
    554
    555static void flush_tlb_range_ipi(void *info)
    556{
    557	struct flush_tlb_data *fd = info;
    558
    559	local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
    560}
    561
    562void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
    563{
    564	struct mm_struct *mm = vma->vm_mm;
    565	unsigned long addr;
    566	u32 old_mmid;
    567
    568	preempt_disable();
    569	if (cpu_has_mmid) {
    570		htw_stop();
    571		old_mmid = read_c0_memorymapid();
    572		write_c0_memorymapid(cpu_asid(0, mm));
    573		mtc0_tlbw_hazard();
    574		addr = round_down(start, PAGE_SIZE * 2);
    575		end = round_up(end, PAGE_SIZE * 2);
    576		do {
    577			ginvt_va_mmid(addr);
    578			sync_ginv();
    579			addr += PAGE_SIZE * 2;
    580		} while (addr < end);
    581		write_c0_memorymapid(old_mmid);
    582		instruction_hazard();
    583		htw_start();
    584	} else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
    585		struct flush_tlb_data fd = {
    586			.vma = vma,
    587			.addr1 = start,
    588			.addr2 = end,
    589		};
    590
    591		smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
    592		local_flush_tlb_range(vma, start, end);
    593	} else {
    594		unsigned int cpu;
    595		int exec = vma->vm_flags & VM_EXEC;
    596
    597		for_each_online_cpu(cpu) {
    598			/*
    599			 * flush_cache_range() will only fully flush icache if
    600			 * the VMA is executable, otherwise we must invalidate
    601			 * ASID without it appearing to has_valid_asid() as if
    602			 * mm has been completely unused by that CPU.
    603			 */
    604			if (cpu != smp_processor_id() && cpu_context(cpu, mm))
    605				set_cpu_context(cpu, mm, !exec);
    606		}
    607		local_flush_tlb_range(vma, start, end);
    608	}
    609	preempt_enable();
    610}
    611
    612static void flush_tlb_kernel_range_ipi(void *info)
    613{
    614	struct flush_tlb_data *fd = info;
    615
    616	local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
    617}
    618
    619void flush_tlb_kernel_range(unsigned long start, unsigned long end)
    620{
    621	struct flush_tlb_data fd = {
    622		.addr1 = start,
    623		.addr2 = end,
    624	};
    625
    626	on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
    627}
    628
    629static void flush_tlb_page_ipi(void *info)
    630{
    631	struct flush_tlb_data *fd = info;
    632
    633	local_flush_tlb_page(fd->vma, fd->addr1);
    634}
    635
    636void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
    637{
    638	u32 old_mmid;
    639
    640	preempt_disable();
    641	if (cpu_has_mmid) {
    642		htw_stop();
    643		old_mmid = read_c0_memorymapid();
    644		write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
    645		mtc0_tlbw_hazard();
    646		ginvt_va_mmid(page);
    647		sync_ginv();
    648		write_c0_memorymapid(old_mmid);
    649		instruction_hazard();
    650		htw_start();
    651	} else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
    652		   (current->mm != vma->vm_mm)) {
    653		struct flush_tlb_data fd = {
    654			.vma = vma,
    655			.addr1 = page,
    656		};
    657
    658		smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
    659		local_flush_tlb_page(vma, page);
    660	} else {
    661		unsigned int cpu;
    662
    663		for_each_online_cpu(cpu) {
    664			/*
    665			 * flush_cache_page() only does partial flushes, so
    666			 * invalidate ASID without it appearing to
    667			 * has_valid_asid() as if mm has been completely unused
    668			 * by that CPU.
    669			 */
    670			if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
    671				set_cpu_context(cpu, vma->vm_mm, 1);
    672		}
    673		local_flush_tlb_page(vma, page);
    674	}
    675	preempt_enable();
    676}
    677
    678static void flush_tlb_one_ipi(void *info)
    679{
    680	unsigned long vaddr = (unsigned long) info;
    681
    682	local_flush_tlb_one(vaddr);
    683}
    684
    685void flush_tlb_one(unsigned long vaddr)
    686{
    687	smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
    688}
    689
    690EXPORT_SYMBOL(flush_tlb_page);
    691EXPORT_SYMBOL(flush_tlb_one);
    692
    693#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
    694
    695static void tick_broadcast_callee(void *info)
    696{
    697	tick_receive_broadcast();
    698}
    699
    700static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd) =
    701	CSD_INIT(tick_broadcast_callee, NULL);
    702
    703void tick_broadcast(const struct cpumask *mask)
    704{
    705	call_single_data_t *csd;
    706	int cpu;
    707
    708	for_each_cpu(cpu, mask) {
    709		csd = &per_cpu(tick_broadcast_csd, cpu);
    710		smp_call_function_single_async(cpu, csd);
    711	}
    712}
    713
    714#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */