Skip to content
Snippets Groups Projects
Select Git revision
  • f1f0f330b1d0ac1bcc38d7c84d439f4fde341a9c
  • master default
  • b4/phy-realtek-clock-fix
  • b4/rk3576-rock4d-phy-timings
  • b4/dw-wdt-fix-initial-timeout
  • radxa-v6.1-vendor-kernel
  • b4/fusb302-race-condition-fix
  • b4/rk3576-rock4d-phy-handling-fixes
  • b4/rk3588-evb1-hdmi-rx
  • b4/rk3576-fix-fspi-pmdomain
  • b4/usbc-for-rock5bp
  • b4/rock5bp-for-upstream
  • rockchip-devel
  • rk3588-test
  • rk3588-test-vendor-cam
  • lf-6.6.y_6.6.23-2.0.0_var01-panfrost
  • rk3588-linked-clk-gate-for-upstream
  • rk3588-gpu-pwr-domain-for-upstream
  • rk3588-rock5b-usbc-for-upstream
  • rk3588-evb1-for-upstream
  • imx95-upstream-with-vendor-display-stack
  • v5.17
  • v5.17-rc8
  • v5.17-rc7
  • v5.17-rc6
  • v5.17-rc5
  • v5.17-rc4
  • v5.17-rc3
  • v5.17-rc2
  • v5.17-rc1
  • v5.16
  • v5.16-rc8
  • v5.16-rc7
  • v5.16-rc6
  • v5.16-rc5
  • v5.16-rc4
  • v5.16-rc3
  • v5.16-rc2
  • v5.16-rc1
  • v5.15
  • v5.15-rc7
41 results

algif_rng.c

Blame
  • bucket_locks.c 1.40 KiB
    #include <linux/export.h>
    #include <linux/kernel.h>
    #include <linux/mm.h>
    #include <linux/slab.h>
    #include <linux/vmalloc.h>
    
    /* Allocate an array of spinlocks to be accessed by a hash. Two arguments
     * indicate the number of elements to allocate in the array. max_size
     * gives the maximum number of elements to allocate. cpu_mult gives
     * the number of locks per CPU to allocate. The size is rounded up
     * to a power of 2 to be suitable as a hash table.
     */
    
    int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *locks_mask,
    			     size_t max_size, unsigned int cpu_mult, gfp_t gfp,
    			     const char *name, struct lock_class_key *key)
    {
    	spinlock_t *tlocks = NULL;
    	unsigned int i, size;
    #if defined(CONFIG_PROVE_LOCKING)
    	unsigned int nr_pcpus = 2;
    #else
    	unsigned int nr_pcpus = num_possible_cpus();
    #endif
    
    	if (cpu_mult) {
    		nr_pcpus = min_t(unsigned int, nr_pcpus, 64UL);
    		size = min_t(unsigned int, nr_pcpus * cpu_mult, max_size);
    	} else {
    		size = max_size;
    	}
    
    	if (sizeof(spinlock_t) != 0) {
    		tlocks = kvmalloc_array(size, sizeof(spinlock_t), gfp);
    		if (!tlocks)
    			return -ENOMEM;
    		for (i = 0; i < size; i++) {
    			spin_lock_init(&tlocks[i]);
    			lockdep_init_map(&tlocks[i].dep_map, name, key, 0);
    		}
    	}
    
    	*locks = tlocks;
    	*locks_mask = size - 1;
    
    	return 0;
    }
    EXPORT_SYMBOL(__alloc_bucket_spinlocks);
    
    void free_bucket_spinlocks(spinlock_t *locks)
    {
    	kvfree(locks);
    }
    EXPORT_SYMBOL(free_bucket_spinlocks);