Skip to content
Snippets Groups Projects
Select Git revision
  • 2948a1fcd77a8bb11604387e3fc52f0ebf5729e9
  • vme-testing default
  • ci-test
  • master
  • remoteproc
  • am625-sk-ov5640
  • pcal6534-upstreaming
  • lps22df-upstreaming
  • msc-upstreaming
  • imx8mp
  • iio/noa1305
  • vme-next
  • vme-next-4.14-rc4
  • v4.14-rc4
  • v4.14-rc3
  • v4.14-rc2
  • v4.14-rc1
  • v4.13
  • vme-next-4.13-rc7
  • v4.13-rc7
  • v4.13-rc6
  • v4.13-rc5
  • v4.13-rc4
  • v4.13-rc3
  • v4.13-rc2
  • v4.13-rc1
  • v4.12
  • v4.12-rc7
  • v4.12-rc6
  • v4.12-rc5
  • v4.12-rc4
  • v4.12-rc3
32 results

socket.c

Blame
    • Jon Maloy's avatar
      2948a1fc
      tipc: fix unitilized skb list crash · 2948a1fc
      Jon Maloy authored
      
      Our test suite somtimes provokes the following crash:
      
      Description of problem:
      [ 1092.597234] BUG: unable to handle kernel NULL pointer dereference at 00000000000000e8
      [ 1092.605072] PGD 0 P4D 0
      [ 1092.607620] Oops: 0000 [#1] SMP PTI
      [ 1092.611118] CPU: 37 PID: 0 Comm: swapper/37 Kdump: loaded Not tainted 4.18.0-122.el8.x86_64 #1
      [ 1092.619724] Hardware name: Dell Inc. PowerEdge R740/08D89F, BIOS 1.3.7 02/08/2018
      [ 1092.627215] RIP: 0010:tipc_mcast_filter_msg+0x93/0x2d0 [tipc]
      [ 1092.632955] Code: 0f 84 aa 01 00 00 89 cf 4d 01 ca 4c 8b 26 c1 ef 19 83 e7 0f 83 ff 0c 4d 0f 45 d1 41 8b 6a 10 0f cd 4c 39 e6 0f 84 81 01 00 00 <4d> 8b 9c 24 e8 00 00 00 45 8b 13 41 0f ca 44 89 d7 c1 ef 13 83 e7
      [ 1092.651703] RSP: 0018:ffff929e5fa83a18 EFLAGS: 00010282
      [ 1092.656927] RAX: ffff929e3fb38100 RBX: 00000000069f29ee RCX: 00000000416c0045
      [ 1092.664058] RDX: ffff929e5fa83a88 RSI: ffff929e31a28420 RDI: 0000000000000000
      [ 1092.671209] RBP: 0000000029b11821 R08: 0000000000000000 R09: ffff929e39b4407a
      [ 1092.678343] R10: ffff929e39b4407a R11: 0000000000000007 R12: 0000000000000000
      [ 1092.685475] R13: 0000000000000001 R14: ffff929e3fb38100 R15: ffff929e39b4407a
      [ 1092.692614] FS:  0000000000000000(0000) GS:ffff929e5fa80000(0000) knlGS:0000000000000000
      [ 1092.700702] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
      [ 1092.706447] CR2: 00000000000000e8 CR3: 000000031300a004 CR4: 00000000007606e0
      [ 1092.713579] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
      [ 1092.720712] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
      [ 1092.727843] PKRU: 55555554
      [ 1092.730556] Call Trace:
      [ 1092.733010]  <IRQ>
      [ 1092.735034]  tipc_sk_filter_rcv+0x7ca/0xb80 [tipc]
      [ 1092.739828]  ? __kmalloc_node_track_caller+0x1cb/0x290
      [ 1092.744974]  ? dev_hard_start_xmit+0xa5/0x210
      [ 1092.749332]  tipc_sk_rcv+0x389/0x640 [tipc]
      [ 1092.753519]  tipc_sk_mcast_rcv+0x23c/0x3a0 [tipc]
      [ 1092.758224]  tipc_rcv+0x57a/0xf20 [tipc]
      [ 1092.762154]  ? ktime_get_real_ts64+0x40/0xe0
      [ 1092.766432]  ? tpacket_rcv+0x50/0x9f0
      [ 1092.770098]  tipc_l2_rcv_msg+0x4a/0x70 [tipc]
      [ 1092.774452]  __netif_receive_skb_core+0xb62/0xbd0
      [ 1092.779164]  ? enqueue_entity+0xf6/0x630
      [ 1092.783084]  ? kmem_cache_alloc+0x158/0x1c0
      [ 1092.787272]  ? __build_skb+0x25/0xd0
      [ 1092.790849]  netif_receive_skb_internal+0x42/0xf0
      [ 1092.795557]  napi_gro_receive+0xba/0xe0
      [ 1092.799417]  mlx5e_handle_rx_cqe+0x83/0xd0 [mlx5_core]
      [ 1092.804564]  mlx5e_poll_rx_cq+0xd5/0x920 [mlx5_core]
      [ 1092.809536]  mlx5e_napi_poll+0xb2/0xce0 [mlx5_core]
      [ 1092.814415]  ? __wake_up_common_lock+0x89/0xc0
      [ 1092.818861]  net_rx_action+0x149/0x3b0
      [ 1092.822616]  __do_softirq+0xe3/0x30a
      [ 1092.826193]  irq_exit+0x100/0x110
      [ 1092.829512]  do_IRQ+0x85/0xd0
      [ 1092.832483]  common_interrupt+0xf/0xf
      [ 1092.836147]  </IRQ>
      [ 1092.838255] RIP: 0010:cpuidle_enter_state+0xb7/0x2a0
      [ 1092.843221] Code: e8 3e 79 a5 ff 80 7c 24 03 00 74 17 9c 58 0f 1f 44 00 00 f6 c4 02 0f 85 d7 01 00 00 31 ff e8 a0 6b ab ff fb 66 0f 1f 44 00 00 <48> b8 ff ff ff ff f3 01 00 00 4c 29 f3 ba ff ff ff 7f 48 39 c3 7f
      [ 1092.861967] RSP: 0018:ffffaa5ec6533e98 EFLAGS: 00000246 ORIG_RAX: ffffffffffffffdd
      [ 1092.869530] RAX: ffff929e5faa3100 RBX: 000000fe63dd2092 RCX: 000000000000001f
      [ 1092.876665] RDX: 000000fe63dd2092 RSI: 000000003a518aaa RDI: 0000000000000000
      [ 1092.883795] RBP: 0000000000000003 R08: 0000000000000004 R09: 0000000000022940
      [ 1092.890929] R10: 0000040cb0666b56 R11: ffff929e5faa20a8 R12: ffff929e5faade78
      [ 1092.898060] R13: ffffffffb59258f8 R14: 000000fe60f3228d R15: 0000000000000000
      [ 1092.905196]  ? cpuidle_enter_state+0x92/0x2a0
      [ 1092.909555]  do_idle+0x236/0x280
      [ 1092.912785]  cpu_startup_entry+0x6f/0x80
      [ 1092.916715]  start_secondary+0x1a7/0x200
      [ 1092.920642]  secondary_startup_64+0xb7/0xc0
      [...]
      
      The reason is that the skb list tipc_socket::mc_method.deferredq only
      is initialized for connectionless sockets, while nothing stops arriving
      multicast messages from being filtered by connection oriented sockets,
      with subsequent access to the said list.
      
      We fix this by initializing the list unconditionally at socket creation.
      This eliminates the crash, while the message still is dropped further
      down in tipc_sk_filter_rcv() as it should be.
      
      Reported-by: default avatarLi Shuang <shuali@redhat.com>
      Signed-off-by: default avatarJon Maloy <jon.maloy@ericsson.com>
      Reviewed-by: default avatarXin Long <lucien.xin@gmail.com>
      Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
      2948a1fc
      History
      tipc: fix unitilized skb list crash
      Jon Maloy authored
      
      Our test suite somtimes provokes the following crash:
      
      Description of problem:
      [ 1092.597234] BUG: unable to handle kernel NULL pointer dereference at 00000000000000e8
      [ 1092.605072] PGD 0 P4D 0
      [ 1092.607620] Oops: 0000 [#1] SMP PTI
      [ 1092.611118] CPU: 37 PID: 0 Comm: swapper/37 Kdump: loaded Not tainted 4.18.0-122.el8.x86_64 #1
      [ 1092.619724] Hardware name: Dell Inc. PowerEdge R740/08D89F, BIOS 1.3.7 02/08/2018
      [ 1092.627215] RIP: 0010:tipc_mcast_filter_msg+0x93/0x2d0 [tipc]
      [ 1092.632955] Code: 0f 84 aa 01 00 00 89 cf 4d 01 ca 4c 8b 26 c1 ef 19 83 e7 0f 83 ff 0c 4d 0f 45 d1 41 8b 6a 10 0f cd 4c 39 e6 0f 84 81 01 00 00 <4d> 8b 9c 24 e8 00 00 00 45 8b 13 41 0f ca 44 89 d7 c1 ef 13 83 e7
      [ 1092.651703] RSP: 0018:ffff929e5fa83a18 EFLAGS: 00010282
      [ 1092.656927] RAX: ffff929e3fb38100 RBX: 00000000069f29ee RCX: 00000000416c0045
      [ 1092.664058] RDX: ffff929e5fa83a88 RSI: ffff929e31a28420 RDI: 0000000000000000
      [ 1092.671209] RBP: 0000000029b11821 R08: 0000000000000000 R09: ffff929e39b4407a
      [ 1092.678343] R10: ffff929e39b4407a R11: 0000000000000007 R12: 0000000000000000
      [ 1092.685475] R13: 0000000000000001 R14: ffff929e3fb38100 R15: ffff929e39b4407a
      [ 1092.692614] FS:  0000000000000000(0000) GS:ffff929e5fa80000(0000) knlGS:0000000000000000
      [ 1092.700702] CS:  0010 DS: 0000 ES: 0000 CR0: 0000000080050033
      [ 1092.706447] CR2: 00000000000000e8 CR3: 000000031300a004 CR4: 00000000007606e0
      [ 1092.713579] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
      [ 1092.720712] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
      [ 1092.727843] PKRU: 55555554
      [ 1092.730556] Call Trace:
      [ 1092.733010]  <IRQ>
      [ 1092.735034]  tipc_sk_filter_rcv+0x7ca/0xb80 [tipc]
      [ 1092.739828]  ? __kmalloc_node_track_caller+0x1cb/0x290
      [ 1092.744974]  ? dev_hard_start_xmit+0xa5/0x210
      [ 1092.749332]  tipc_sk_rcv+0x389/0x640 [tipc]
      [ 1092.753519]  tipc_sk_mcast_rcv+0x23c/0x3a0 [tipc]
      [ 1092.758224]  tipc_rcv+0x57a/0xf20 [tipc]
      [ 1092.762154]  ? ktime_get_real_ts64+0x40/0xe0
      [ 1092.766432]  ? tpacket_rcv+0x50/0x9f0
      [ 1092.770098]  tipc_l2_rcv_msg+0x4a/0x70 [tipc]
      [ 1092.774452]  __netif_receive_skb_core+0xb62/0xbd0
      [ 1092.779164]  ? enqueue_entity+0xf6/0x630
      [ 1092.783084]  ? kmem_cache_alloc+0x158/0x1c0
      [ 1092.787272]  ? __build_skb+0x25/0xd0
      [ 1092.790849]  netif_receive_skb_internal+0x42/0xf0
      [ 1092.795557]  napi_gro_receive+0xba/0xe0
      [ 1092.799417]  mlx5e_handle_rx_cqe+0x83/0xd0 [mlx5_core]
      [ 1092.804564]  mlx5e_poll_rx_cq+0xd5/0x920 [mlx5_core]
      [ 1092.809536]  mlx5e_napi_poll+0xb2/0xce0 [mlx5_core]
      [ 1092.814415]  ? __wake_up_common_lock+0x89/0xc0
      [ 1092.818861]  net_rx_action+0x149/0x3b0
      [ 1092.822616]  __do_softirq+0xe3/0x30a
      [ 1092.826193]  irq_exit+0x100/0x110
      [ 1092.829512]  do_IRQ+0x85/0xd0
      [ 1092.832483]  common_interrupt+0xf/0xf
      [ 1092.836147]  </IRQ>
      [ 1092.838255] RIP: 0010:cpuidle_enter_state+0xb7/0x2a0
      [ 1092.843221] Code: e8 3e 79 a5 ff 80 7c 24 03 00 74 17 9c 58 0f 1f 44 00 00 f6 c4 02 0f 85 d7 01 00 00 31 ff e8 a0 6b ab ff fb 66 0f 1f 44 00 00 <48> b8 ff ff ff ff f3 01 00 00 4c 29 f3 ba ff ff ff 7f 48 39 c3 7f
      [ 1092.861967] RSP: 0018:ffffaa5ec6533e98 EFLAGS: 00000246 ORIG_RAX: ffffffffffffffdd
      [ 1092.869530] RAX: ffff929e5faa3100 RBX: 000000fe63dd2092 RCX: 000000000000001f
      [ 1092.876665] RDX: 000000fe63dd2092 RSI: 000000003a518aaa RDI: 0000000000000000
      [ 1092.883795] RBP: 0000000000000003 R08: 0000000000000004 R09: 0000000000022940
      [ 1092.890929] R10: 0000040cb0666b56 R11: ffff929e5faa20a8 R12: ffff929e5faade78
      [ 1092.898060] R13: ffffffffb59258f8 R14: 000000fe60f3228d R15: 0000000000000000
      [ 1092.905196]  ? cpuidle_enter_state+0x92/0x2a0
      [ 1092.909555]  do_idle+0x236/0x280
      [ 1092.912785]  cpu_startup_entry+0x6f/0x80
      [ 1092.916715]  start_secondary+0x1a7/0x200
      [ 1092.920642]  secondary_startup_64+0xb7/0xc0
      [...]
      
      The reason is that the skb list tipc_socket::mc_method.deferredq only
      is initialized for connectionless sockets, while nothing stops arriving
      multicast messages from being filtered by connection oriented sockets,
      with subsequent access to the said list.
      
      We fix this by initializing the list unconditionally at socket creation.
      This eliminates the crash, while the message still is dropped further
      down in tipc_sk_filter_rcv() as it should be.
      
      Reported-by: default avatarLi Shuang <shuali@redhat.com>
      Signed-off-by: default avatarJon Maloy <jon.maloy@ericsson.com>
      Reviewed-by: default avatarXin Long <lucien.xin@gmail.com>
      Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
    trace_stack.c 7.37 KiB
    /*
     * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
     *
     */
    #include <linux/stacktrace.h>
    #include <linux/kallsyms.h>
    #include <linux/seq_file.h>
    #include <linux/spinlock.h>
    #include <linux/uaccess.h>
    #include <linux/debugfs.h>
    #include <linux/ftrace.h>
    #include <linux/module.h>
    #include <linux/sysctl.h>
    #include <linux/init.h>
    #include <linux/fs.h>
    #include "trace.h"
    
    #define STACK_TRACE_ENTRIES 500
    
    static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
    	 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
    static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
    
    static struct stack_trace max_stack_trace = {
    	.max_entries		= STACK_TRACE_ENTRIES,
    	.entries		= stack_dump_trace,
    };
    
    static unsigned long max_stack_size;
    static raw_spinlock_t max_stack_lock =
    	(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
    
    static int stack_trace_disabled __read_mostly;
    static DEFINE_PER_CPU(int, trace_active);
    static DEFINE_MUTEX(stack_sysctl_mutex);
    
    int stack_tracer_enabled;
    static int last_stack_tracer_enabled;
    
    static inline void check_stack(void)
    {
    	unsigned long this_size, flags;
    	unsigned long *p, *top, *start;
    	int i;
    
    	this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
    	this_size = THREAD_SIZE - this_size;
    
    	if (this_size <= max_stack_size)
    		return;
    
    	/* we do not handle interrupt stacks yet */
    	if (!object_is_on_stack(&this_size))
    		return;
    
    	local_irq_save(flags);
    	__raw_spin_lock(&max_stack_lock);
    
    	/* a race could have already updated it */
    	if (this_size <= max_stack_size)
    		goto out;
    
    	max_stack_size = this_size;
    
    	max_stack_trace.nr_entries	= 0;
    	max_stack_trace.skip		= 3;
    
    	save_stack_trace(&max_stack_trace);
    
    	/*
    	 * Now find where in the stack these are.
    	 */
    	i = 0;
    	start = &this_size;
    	top = (unsigned long *)
    		(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
    
    	/*
    	 * Loop through all the entries. One of the entries may
    	 * for some reason be missed on the stack, so we may
    	 * have to account for them. If they are all there, this
    	 * loop will only happen once. This code only takes place
    	 * on a new max, so it is far from a fast path.
    	 */
    	while (i < max_stack_trace.nr_entries) {
    		int found = 0;
    
    		stack_dump_index[i] = this_size;
    		p = start;
    
    		for (; p < top && i < max_stack_trace.nr_entries; p++) {
    			if (*p == stack_dump_trace[i]) {
    				this_size = stack_dump_index[i++] =
    					(top - p) * sizeof(unsigned long);
    				found = 1;
    				/* Start the search from here */
    				start = p + 1;
    			}
    		}
    
    		if (!found)
    			i++;
    	}
    
     out:
    	__raw_spin_unlock(&max_stack_lock);
    	local_irq_restore(flags);
    }
    
    static void
    stack_trace_call(unsigned long ip, unsigned long parent_ip)
    {
    	int cpu, resched;
    
    	if (unlikely(!ftrace_enabled || stack_trace_disabled))
    		return;
    
    	resched = ftrace_preempt_disable();
    
    	cpu = raw_smp_processor_id();
    	/* no atomic needed, we only modify this variable by this cpu */
    	if (per_cpu(trace_active, cpu)++ != 0)
    		goto out;
    
    	check_stack();
    
     out:
    	per_cpu(trace_active, cpu)--;
    	/* prevent recursion in schedule */
    	ftrace_preempt_enable(resched);
    }
    
    static struct ftrace_ops trace_ops __read_mostly =
    {
    	.func = stack_trace_call,
    };
    
    static ssize_t
    stack_max_size_read(struct file *filp, char __user *ubuf,
    		    size_t count, loff_t *ppos)
    {
    	unsigned long *ptr = filp->private_data;
    	char buf[64];
    	int r;
    
    	r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
    	if (r > sizeof(buf))
    		r = sizeof(buf);
    	return simple_read_from_buffer(ubuf, count, ppos, buf, r);
    }
    
    static ssize_t
    stack_max_size_write(struct file *filp, const char __user *ubuf,
    		     size_t count, loff_t *ppos)
    {
    	long *ptr = filp->private_data;
    	unsigned long val, flags;
    	char buf[64];
    	int ret;
    
    	if (count >= sizeof(buf))
    		return -EINVAL;
    
    	if (copy_from_user(&buf, ubuf, count))
    		return -EFAULT;
    
    	buf[count] = 0;
    
    	ret = strict_strtoul(buf, 10, &val);
    	if (ret < 0)
    		return ret;
    
    	local_irq_save(flags);
    	__raw_spin_lock(&max_stack_lock);
    	*ptr = val;
    	__raw_spin_unlock(&max_stack_lock);
    	local_irq_restore(flags);
    
    	return count;
    }
    
    static const struct file_operations stack_max_size_fops = {
    	.open		= tracing_open_generic,
    	.read		= stack_max_size_read,
    	.write		= stack_max_size_write,
    };
    
    static void *
    t_next(struct seq_file *m, void *v, loff_t *pos)
    {
    	long i;
    
    	(*pos)++;
    
    	if (v == SEQ_START_TOKEN)
    		i = 0;
    	else {
    		i = *(long *)v;
    		i++;
    	}
    
    	if (i >= max_stack_trace.nr_entries ||
    	    stack_dump_trace[i] == ULONG_MAX)
    		return NULL;
    
    	m->private = (void *)i;
    
    	return &m->private;
    }
    
    static void *t_start(struct seq_file *m, loff_t *pos)
    {
    	void *t = SEQ_START_TOKEN;
    	loff_t l = 0;
    
    	local_irq_disable();
    	__raw_spin_lock(&max_stack_lock);
    
    	if (*pos == 0)
    		return SEQ_START_TOKEN;
    
    	for (; t && l < *pos; t = t_next(m, t, &l))
    		;
    
    	return t;
    }
    
    static void t_stop(struct seq_file *m, void *p)
    {
    	__raw_spin_unlock(&max_stack_lock);
    	local_irq_enable();
    }
    
    static int trace_lookup_stack(struct seq_file *m, long i)
    {
    	unsigned long addr = stack_dump_trace[i];
    #ifdef CONFIG_KALLSYMS
    	char str[KSYM_SYMBOL_LEN];
    
    	sprint_symbol(str, addr);
    
    	return seq_printf(m, "%s\n", str);
    #else
    	return seq_printf(m, "%p\n", (void*)addr);
    #endif
    }
    
    static int t_show(struct seq_file *m, void *v)
    {
    	long i;
    	int size;
    
    	if (v == SEQ_START_TOKEN) {
    		seq_printf(m, "        Depth   Size      Location"
    			   "    (%d entries)\n"
    			   "        -----   ----      --------\n",
    			   max_stack_trace.nr_entries);
    		return 0;
    	}
    
    	i = *(long *)v;
    
    	if (i >= max_stack_trace.nr_entries ||
    	    stack_dump_trace[i] == ULONG_MAX)
    		return 0;
    
    	if (i+1 == max_stack_trace.nr_entries ||
    	    stack_dump_trace[i+1] == ULONG_MAX)
    		size = stack_dump_index[i];
    	else
    		size = stack_dump_index[i] - stack_dump_index[i+1];
    
    	seq_printf(m, "%3ld) %8d   %5d   ", i, stack_dump_index[i], size);
    
    	trace_lookup_stack(m, i);
    
    	return 0;
    }
    
    static const struct seq_operations stack_trace_seq_ops = {
    	.start		= t_start,
    	.next		= t_next,
    	.stop		= t_stop,
    	.show		= t_show,
    };
    
    static int stack_trace_open(struct inode *inode, struct file *file)
    {
    	int ret;
    
    	ret = seq_open(file, &stack_trace_seq_ops);
    
    	return ret;
    }
    
    static const struct file_operations stack_trace_fops = {
    	.open		= stack_trace_open,
    	.read		= seq_read,
    	.llseek		= seq_lseek,
    };
    
    int
    stack_trace_sysctl(struct ctl_table *table, int write,
    		   struct file *file, void __user *buffer, size_t *lenp,
    		   loff_t *ppos)
    {
    	int ret;
    
    	mutex_lock(&stack_sysctl_mutex);
    
    	ret = proc_dointvec(table, write, file, buffer, lenp, ppos);
    
    	if (ret || !write ||
    	    (last_stack_tracer_enabled == stack_tracer_enabled))
    		goto out;
    
    	last_stack_tracer_enabled = stack_tracer_enabled;
    
    	if (stack_tracer_enabled)
    		register_ftrace_function(&trace_ops);
    	else
    		unregister_ftrace_function(&trace_ops);
    
     out:
    	mutex_unlock(&stack_sysctl_mutex);
    	return ret;
    }
    
    static __init int enable_stacktrace(char *str)
    {
    	stack_tracer_enabled = 1;
    	last_stack_tracer_enabled = 1;
    	return 1;
    }
    __setup("stacktrace", enable_stacktrace);
    
    static __init int stack_trace_init(void)
    {
    	struct dentry *d_tracer;
    	struct dentry *entry;
    
    	d_tracer = tracing_init_dentry();
    
    	entry = debugfs_create_file("stack_max_size", 0644, d_tracer,
    				    &max_stack_size, &stack_max_size_fops);
    	if (!entry)
    		pr_warning("Could not create debugfs 'stack_max_size' entry\n");
    
    	entry = debugfs_create_file("stack_trace", 0444, d_tracer,
    				    NULL, &stack_trace_fops);
    	if (!entry)
    		pr_warning("Could not create debugfs 'stack_trace' entry\n");
    
    	if (stack_tracer_enabled)
    		register_ftrace_function(&trace_ops);
    
    	return 0;
    }
    
    device_initcall(stack_trace_init);