core-cdev.c 46.2 KB
Newer Older
1
2
/*
 * Char device for device raw access
3
 *
4
 * Copyright (C) 2005-2007  Kristian Hoegsberg <krh@bitplanet.net>
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software Foundation,
 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 */

21
#include <linux/bug.h>
Stefan Richter's avatar
Stefan Richter committed
22
23
24
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/device.h>
25
#include <linux/dma-mapping.h>
Stefan Richter's avatar
Stefan Richter committed
26
#include <linux/errno.h>
27
#include <linux/firewire.h>
Stefan Richter's avatar
Stefan Richter committed
28
29
#include <linux/firewire-cdev.h>
#include <linux/idr.h>
30
#include <linux/irqflags.h>
31
#include <linux/jiffies.h>
32
#include <linux/kernel.h>
33
#include <linux/kref.h>
Stefan Richter's avatar
Stefan Richter committed
34
35
#include <linux/mm.h>
#include <linux/module.h>
36
#include <linux/mutex.h>
37
#include <linux/poll.h>
38
#include <linux/sched.h> /* required for linux/wait.h */
39
#include <linux/slab.h>
40
#include <linux/spinlock.h>
41
#include <linux/string.h>
Stefan Richter's avatar
Stefan Richter committed
42
#include <linux/time.h>
43
#include <linux/uaccess.h>
Stefan Richter's avatar
Stefan Richter committed
44
45
#include <linux/vmalloc.h>
#include <linux/wait.h>
46
#include <linux/workqueue.h>
Stefan Richter's avatar
Stefan Richter committed
47
48


49
#include "core.h"
50

51
52
53
/*
 * ABI version history is documented in linux/firewire-cdev.h.
 */
54
#define FW_CDEV_KERNEL_VERSION			5
55
56
#define FW_CDEV_VERSION_EVENT_REQUEST2		4
#define FW_CDEV_VERSION_ALLOCATE_REGION_END	4
57
#define FW_CDEV_VERSION_AUTO_FLUSH_ISO_OVERFLOW	5
58

59
struct client {
60
	u32 version;
61
	struct fw_device *device;
62

63
	spinlock_t lock;
64
65
	bool in_shutdown;
	struct idr resource_idr;
66
67
	struct list_head event_list;
	wait_queue_head_t wait;
68
	wait_queue_head_t tx_flush_wait;
69
	u64 bus_reset_closure;
70

71
	struct fw_iso_context *iso_context;
72
	u64 iso_closure;
73
74
	struct fw_iso_buffer buffer;
	unsigned long vm_start;
75
	bool buffer_is_mapped;
76

77
78
79
	struct list_head phy_receiver_link;
	u64 phy_receiver_closure;

80
	struct list_head link;
81
	struct kref kref;
82
83
};

84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
static inline void client_get(struct client *client)
{
	kref_get(&client->kref);
}

static void client_release(struct kref *kref)
{
	struct client *client = container_of(kref, struct client, kref);

	fw_device_put(client->device);
	kfree(client);
}

static void client_put(struct client *client)
{
	kref_put(&client->kref, client_release);
}

102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
struct client_resource;
typedef void (*client_resource_release_fn_t)(struct client *,
					     struct client_resource *);
struct client_resource {
	client_resource_release_fn_t release;
	int handle;
};

struct address_handler_resource {
	struct client_resource resource;
	struct fw_address_handler handler;
	__u64 closure;
	struct client *client;
};

struct outbound_transaction_resource {
	struct client_resource resource;
	struct fw_transaction transaction;
};

struct inbound_transaction_resource {
	struct client_resource resource;
124
	struct fw_card *card;
125
126
127
128
129
130
131
132
133
134
135
	struct fw_request *request;
	void *data;
	size_t length;
};

struct descriptor_resource {
	struct client_resource resource;
	struct fw_descriptor descriptor;
	u32 data[0];
};

136
137
138
139
140
struct iso_resource {
	struct client_resource resource;
	struct client *client;
	/* Schedule work and access todo only with client->lock held. */
	struct delayed_work work;
141
142
	enum {ISO_RES_ALLOC, ISO_RES_REALLOC, ISO_RES_DEALLOC,
	      ISO_RES_ALLOC_ONCE, ISO_RES_DEALLOC_ONCE,} todo;
143
144
145
146
147
148
149
150
	int generation;
	u64 channels;
	s32 bandwidth;
	struct iso_resource_event *e_alloc, *e_dealloc;
};

static void release_iso_resource(struct client *, struct client_resource *);

151
152
153
static void schedule_iso_resource(struct iso_resource *r, unsigned long delay)
{
	client_get(r->client);
154
	if (!queue_delayed_work(fw_workqueue, &r->work, delay))
155
156
157
158
159
160
161
162
163
164
		client_put(r->client);
}

static void schedule_if_iso_resource(struct client_resource *resource)
{
	if (resource->release == release_iso_resource)
		schedule_iso_resource(container_of(resource,
					struct iso_resource, resource), 0);
}

165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
/*
 * dequeue_event() just kfree()'s the event, so the event has to be
 * the first field in a struct XYZ_event.
 */
struct event {
	struct { void *data; size_t size; } v[2];
	struct list_head link;
};

struct bus_reset_event {
	struct event event;
	struct fw_cdev_event_bus_reset reset;
};

struct outbound_transaction_event {
	struct event event;
	struct client *client;
	struct outbound_transaction_resource r;
	struct fw_cdev_event_response response;
};

struct inbound_transaction_event {
	struct event event;
188
189
190
191
	union {
		struct fw_cdev_event_request request;
		struct fw_cdev_event_request2 request2;
	} req;
192
193
194
195
196
197
198
};

struct iso_interrupt_event {
	struct event event;
	struct fw_cdev_event_iso_interrupt interrupt;
};

199
200
201
202
203
struct iso_interrupt_mc_event {
	struct event event;
	struct fw_cdev_event_iso_interrupt_mc interrupt;
};

204
205
struct iso_resource_event {
	struct event event;
206
	struct fw_cdev_event_iso_resource iso_resource;
207
208
};

209
210
211
212
213
214
215
struct outbound_phy_packet_event {
	struct event event;
	struct client *client;
	struct fw_packet p;
	struct fw_cdev_event_phy_packet phy_packet;
};

216
217
218
219
220
struct inbound_phy_packet_event {
	struct event event;
	struct fw_cdev_event_phy_packet phy_packet;
};

221
222
223
#ifdef CONFIG_COMPAT
static void __user *u64_to_uptr(u64 value)
{
224
	if (in_compat_syscall())
225
226
227
228
229
230
231
		return compat_ptr(value);
	else
		return (void __user *)(unsigned long)value;
}

static u64 uptr_to_u64(void __user *ptr)
{
232
	if (in_compat_syscall())
233
234
235
236
237
238
		return ptr_to_compat(ptr);
	else
		return (u64)(unsigned long)ptr;
}
#else
static inline void __user *u64_to_uptr(u64 value)
239
240
241
242
{
	return (void __user *)(unsigned long)value;
}

243
static inline u64 uptr_to_u64(void __user *ptr)
244
{
245
	return (u64)(unsigned long)ptr;
246
}
247
#endif /* CONFIG_COMPAT */
248
249
250
251
252
253

static int fw_device_op_open(struct inode *inode, struct file *file)
{
	struct fw_device *device;
	struct client *client;

254
	device = fw_device_get_by_devt(inode->i_rdev);
255
256
	if (device == NULL)
		return -ENODEV;
257

258
259
260
261
262
	if (fw_device_is_shutdown(device)) {
		fw_device_put(device);
		return -ENODEV;
	}

263
	client = kzalloc(sizeof(*client), GFP_KERNEL);
264
265
	if (client == NULL) {
		fw_device_put(device);
266
		return -ENOMEM;
267
	}
268

269
	client->device = device;
270
	spin_lock_init(&client->lock);
271
272
	idr_init(&client->resource_idr);
	INIT_LIST_HEAD(&client->event_list);
273
	init_waitqueue_head(&client->wait);
274
	init_waitqueue_head(&client->tx_flush_wait);
275
	INIT_LIST_HEAD(&client->phy_receiver_link);
276
	INIT_LIST_HEAD(&client->link);
277
	kref_init(&client->kref);
278
279
280

	file->private_data = client;

281
	return nonseekable_open(inode, file);
282
283
284
285
286
287
288
289
290
291
292
293
294
}

static void queue_event(struct client *client, struct event *event,
			void *data0, size_t size0, void *data1, size_t size1)
{
	unsigned long flags;

	event->v[0].data = data0;
	event->v[0].size = size0;
	event->v[1].data = data1;
	event->v[1].size = size1;

	spin_lock_irqsave(&client->lock, flags);
295
296
297
298
	if (client->in_shutdown)
		kfree(event);
	else
		list_add_tail(&event->link, &client->event_list);
299
	spin_unlock_irqrestore(&client->lock, flags);
300
301

	wake_up_interruptible(&client->wait);
302
303
}

304
305
static int dequeue_event(struct client *client,
			 char __user *buffer, size_t count)
306
307
308
{
	struct event *event;
	size_t size, total;
309
	int i, ret;
310

311
312
313
314
315
	ret = wait_event_interruptible(client->wait,
			!list_empty(&client->event_list) ||
			fw_device_is_shutdown(client->device));
	if (ret < 0)
		return ret;
316

317
318
319
	if (list_empty(&client->event_list) &&
		       fw_device_is_shutdown(client->device))
		return -ENODEV;
320

321
	spin_lock_irq(&client->lock);
322
	event = list_first_entry(&client->event_list, struct event, link);
323
	list_del(&event->link);
324
	spin_unlock_irq(&client->lock);
325
326
327
328

	total = 0;
	for (i = 0; i < ARRAY_SIZE(event->v) && total < count; i++) {
		size = min(event->v[i].size, count - total);
329
		if (copy_to_user(buffer + total, event->v[i].data, size)) {
330
			ret = -EFAULT;
331
			goto out;
332
		}
333
334
		total += size;
	}
335
	ret = total;
336
337
338
339

 out:
	kfree(event);

340
	return ret;
341
342
}

343
344
static ssize_t fw_device_op_read(struct file *file, char __user *buffer,
				 size_t count, loff_t *offset)
345
346
347
348
349
350
{
	struct client *client = file->private_data;

	return dequeue_event(client, buffer, count);
}

351
352
static void fill_bus_reset_event(struct fw_cdev_event_bus_reset *event,
				 struct client *client)
353
{
354
	struct fw_card *card = client->device->card;
355

356
	spin_lock_irq(&card->lock);
357

358
	event->closure	     = client->bus_reset_closure;
359
	event->type          = FW_CDEV_EVENT_BUS_RESET;
360
	event->generation    = client->device->generation;
361
	event->node_id       = client->device->node_id;
362
	event->local_node_id = card->local_node->node_id;
363
	event->bm_node_id    = card->bm_node_id;
364
365
	event->irm_node_id   = card->irm_node->node_id;
	event->root_node_id  = card->root_node->node_id;
366

367
	spin_unlock_irq(&card->lock);
368
369
}

370
371
static void for_each_client(struct fw_device *device,
			    void (*callback)(struct client *client))
372
373
374
{
	struct client *c;

375
	mutex_lock(&device->client_list_mutex);
376
377
	list_for_each_entry(c, &device->client_list, link)
		callback(c);
378
	mutex_unlock(&device->client_list_mutex);
379
380
}

381
382
static int schedule_reallocations(int id, void *p, void *data)
{
383
	schedule_if_iso_resource(p);
384
385
386
387

	return 0;
}

388
static void queue_bus_reset_event(struct client *client)
389
{
390
	struct bus_reset_event *e;
391

392
	e = kzalloc(sizeof(*e), GFP_KERNEL);
393
	if (e == NULL)
394
395
		return;

396
	fill_bus_reset_event(&e->reset, client);
397

398
399
	queue_event(client, &e->event,
		    &e->reset, sizeof(e->reset), NULL, 0);
400
401
402
403

	spin_lock_irq(&client->lock);
	idr_for_each(&client->resource_idr, schedule_reallocations, client);
	spin_unlock_irq(&client->lock);
404
405
406
407
}

void fw_device_cdev_update(struct fw_device *device)
{
408
409
	for_each_client(device, queue_bus_reset_event);
}
410

411
412
413
414
static void wake_up_client(struct client *client)
{
	wake_up_interruptible(&client->wait);
}
415

416
417
418
void fw_device_cdev_remove(struct fw_device *device)
{
	for_each_client(device, wake_up_client);
419
420
}

421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
union ioctl_arg {
	struct fw_cdev_get_info			get_info;
	struct fw_cdev_send_request		send_request;
	struct fw_cdev_allocate			allocate;
	struct fw_cdev_deallocate		deallocate;
	struct fw_cdev_send_response		send_response;
	struct fw_cdev_initiate_bus_reset	initiate_bus_reset;
	struct fw_cdev_add_descriptor		add_descriptor;
	struct fw_cdev_remove_descriptor	remove_descriptor;
	struct fw_cdev_create_iso_context	create_iso_context;
	struct fw_cdev_queue_iso		queue_iso;
	struct fw_cdev_start_iso		start_iso;
	struct fw_cdev_stop_iso			stop_iso;
	struct fw_cdev_get_cycle_timer		get_cycle_timer;
	struct fw_cdev_allocate_iso_resource	allocate_iso_resource;
	struct fw_cdev_send_stream_packet	send_stream_packet;
	struct fw_cdev_get_cycle_timer2		get_cycle_timer2;
438
	struct fw_cdev_send_phy_packet		send_phy_packet;
439
	struct fw_cdev_receive_phy_packets	receive_phy_packets;
440
	struct fw_cdev_set_iso_channels		set_iso_channels;
441
	struct fw_cdev_flush_iso		flush_iso;
442
443
444
};

static int ioctl_get_info(struct client *client, union ioctl_arg *arg)
445
{
446
	struct fw_cdev_get_info *a = &arg->get_info;
447
	struct fw_cdev_event_bus_reset bus_reset;
448
	unsigned long ret = 0;
449

450
	client->version = a->version;
451
	a->version = FW_CDEV_KERNEL_VERSION;
452
	a->card = client->device->card->index;
453

454
455
	down_read(&fw_device_rwsem);

456
457
	if (a->rom != 0) {
		size_t want = a->rom_length;
458
		size_t have = client->device->config_rom_length * 4;
459

460
461
		ret = copy_to_user(u64_to_uptr(a->rom),
				   client->device->config_rom, min(want, have));
462
	}
463
	a->rom_length = client->device->config_rom_length * 4;
464

465
466
467
468
469
	up_read(&fw_device_rwsem);

	if (ret != 0)
		return -EFAULT;

470
471
	mutex_lock(&client->device->client_list_mutex);

472
473
	client->bus_reset_closure = a->bus_reset_closure;
	if (a->bus_reset != 0) {
474
		fill_bus_reset_event(&bus_reset, client);
475
476
		/* unaligned size of bus_reset is 36 bytes */
		ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36);
477
	}
478
479
	if (ret == 0 && list_empty(&client->link))
		list_add_tail(&client->link, &client->device->client_list);
480

481
482
483
	mutex_unlock(&client->device->client_list_mutex);

	return ret ? -EFAULT : 0;
484
485
}

486
487
static int add_client_resource(struct client *client,
			       struct client_resource *resource, gfp_t gfp_mask)
488
{
489
	bool preload = gfpflags_allow_blocking(gfp_mask);
490
	unsigned long flags;
491
492
	int ret;

Tejun Heo's avatar
Tejun Heo committed
493
494
	if (preload)
		idr_preload(gfp_mask);
495
	spin_lock_irqsave(&client->lock, flags);
Tejun Heo's avatar
Tejun Heo committed
496

497
498
499
	if (client->in_shutdown)
		ret = -ECANCELED;
	else
Tejun Heo's avatar
Tejun Heo committed
500
501
		ret = idr_alloc(&client->resource_idr, resource, 0, 0,
				GFP_NOWAIT);
502
	if (ret >= 0) {
Tejun Heo's avatar
Tejun Heo committed
503
		resource->handle = ret;
504
		client_get(client);
505
		schedule_if_iso_resource(resource);
506
	}
507

Tejun Heo's avatar
Tejun Heo committed
508
509
510
	spin_unlock_irqrestore(&client->lock, flags);
	if (preload)
		idr_preload_end();
511
512

	return ret < 0 ? ret : 0;
513
514
}

515
516
static int release_client_resource(struct client *client, u32 handle,
				   client_resource_release_fn_t release,
517
				   struct client_resource **return_resource)
518
{
519
	struct client_resource *resource;
520

521
	spin_lock_irq(&client->lock);
522
	if (client->in_shutdown)
523
		resource = NULL;
524
	else
525
526
		resource = idr_find(&client->resource_idr, handle);
	if (resource && resource->release == release)
527
		idr_remove(&client->resource_idr, handle);
528
	spin_unlock_irq(&client->lock);
529

530
	if (!(resource && resource->release == release))
531
532
		return -EINVAL;

533
534
	if (return_resource)
		*return_resource = resource;
535
	else
536
		resource->release(client, resource);
537

538
539
	client_put(client);

540
541
542
	return 0;
}

543
544
static void release_transaction(struct client *client,
				struct client_resource *resource)
545
546
547
{
}

548
549
static void complete_transaction(struct fw_card *card, int rcode,
				 void *payload, size_t length, void *data)
550
{
551
552
553
	struct outbound_transaction_event *e = data;
	struct fw_cdev_event_response *rsp = &e->response;
	struct client *client = e->client;
554
	unsigned long flags;
555

556
557
	if (length < rsp->length)
		rsp->length = length;
558
	if (rcode == RCODE_COMPLETE)
559
		memcpy(rsp->data, payload, rsp->length);
560

561
	spin_lock_irqsave(&client->lock, flags);
562
563
564
	idr_remove(&client->resource_idr, e->r.resource.handle);
	if (client->in_shutdown)
		wake_up(&client->tx_flush_wait);
565
566
	spin_unlock_irqrestore(&client->lock, flags);

567
568
	rsp->type = FW_CDEV_EVENT_RESPONSE;
	rsp->rcode = rcode;
569
570

	/*
571
	 * In the case that sizeof(*rsp) doesn't align with the position of the
572
573
574
575
576
	 * data, and the read is short, preserve an extra copy of the data
	 * to stay compatible with a pre-2.6.27 bug.  Since the bug is harmless
	 * for short reads and some apps depended on it, this is both safe
	 * and prudent for compatibility.
	 */
577
578
579
	if (rsp->length <= sizeof(*rsp) - offsetof(typeof(*rsp), data))
		queue_event(client, &e->event, rsp, sizeof(*rsp),
			    rsp->data, rsp->length);
580
	else
581
		queue_event(client, &e->event, rsp, sizeof(*rsp) + rsp->length,
582
			    NULL, 0);
583

584
585
	/* Drop the idr's reference */
	client_put(client);
586
587
}

588
589
590
static int init_request(struct client *client,
			struct fw_cdev_send_request *request,
			int destination_id, int speed)
591
{
592
	struct outbound_transaction_event *e;
593
	int ret;
594

595
596
	if (request->tcode != TCODE_STREAM_DATA &&
	    (request->length > 4096 || request->length > 512 << speed))
597
		return -EIO;
598

599
600
601
602
	if (request->tcode == TCODE_WRITE_QUADLET_REQUEST &&
	    request->length < 4)
		return -EINVAL;

603
604
	e = kmalloc(sizeof(*e) + request->length, GFP_KERNEL);
	if (e == NULL)
605
606
		return -ENOMEM;

607
608
609
	e->client = client;
	e->response.length = request->length;
	e->response.closure = request->closure;
610

611
	if (request->data &&
612
	    copy_from_user(e->response.data,
613
			   u64_to_uptr(request->data), request->length)) {
614
		ret = -EFAULT;
615
		goto failed;
616
617
	}

618
619
	e->r.resource.release = release_transaction;
	ret = add_client_resource(client, &e->r.resource, GFP_KERNEL);
620
621
	if (ret < 0)
		goto failed;
622

623
	fw_send_request(client->device->card, &e->r.transaction,
624
625
626
627
			request->tcode, destination_id, request->generation,
			speed, request->offset, e->response.data,
			request->length, complete_transaction, e);
	return 0;
628

629
 failed:
630
	kfree(e);
631
632

	return ret;
633
634
}

635
static int ioctl_send_request(struct client *client, union ioctl_arg *arg)
636
{
637
	switch (arg->send_request.tcode) {
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
	case TCODE_WRITE_QUADLET_REQUEST:
	case TCODE_WRITE_BLOCK_REQUEST:
	case TCODE_READ_QUADLET_REQUEST:
	case TCODE_READ_BLOCK_REQUEST:
	case TCODE_LOCK_MASK_SWAP:
	case TCODE_LOCK_COMPARE_SWAP:
	case TCODE_LOCK_FETCH_ADD:
	case TCODE_LOCK_LITTLE_ADD:
	case TCODE_LOCK_BOUNDED_ADD:
	case TCODE_LOCK_WRAP_ADD:
	case TCODE_LOCK_VENDOR_DEPENDENT:
		break;
	default:
		return -EINVAL;
	}

654
	return init_request(client, &arg->send_request, client->device->node_id,
655
656
657
			    client->device->max_speed);
}

658
659
660
661
662
static inline bool is_fcp_request(struct fw_request *request)
{
	return request == NULL;
}

663
664
static void release_request(struct client *client,
			    struct client_resource *resource)
665
{
666
667
	struct inbound_transaction_resource *r = container_of(resource,
			struct inbound_transaction_resource, resource);
668

669
670
671
	if (is_fcp_request(r->request))
		kfree(r->data);
	else
672
		fw_send_response(r->card, r->request, RCODE_CONFLICT_ERROR);
673
674

	fw_card_put(r->card);
675
	kfree(r);
676
677
}

678
static void handle_request(struct fw_card *card, struct fw_request *request,
679
			   int tcode, int destination, int source,
680
			   int generation, unsigned long long offset,
681
			   void *payload, size_t length, void *callback_data)
682
{
683
684
685
	struct address_handler_resource *handler = callback_data;
	struct inbound_transaction_resource *r;
	struct inbound_transaction_event *e;
686
	size_t event_size0;
687
	void *fcp_frame = NULL;
688
	int ret;
689

690
691
692
	/* card may be different from handler->client->device->card */
	fw_card_get(card);

693
	r = kmalloc(sizeof(*r), GFP_ATOMIC);
694
	e = kmalloc(sizeof(*e), GFP_ATOMIC);
695
	if (r == NULL || e == NULL)
696
		goto failed;
697

698
	r->card    = card;
699
700
701
	r->request = request;
	r->data    = payload;
	r->length  = length;
702

703
704
705
706
707
708
709
710
711
712
713
714
	if (is_fcp_request(request)) {
		/*
		 * FIXME: Let core-transaction.c manage a
		 * single reference-counted copy?
		 */
		fcp_frame = kmemdup(payload, length, GFP_ATOMIC);
		if (fcp_frame == NULL)
			goto failed;

		r->data = fcp_frame;
	}

715
716
	r->resource.release = release_request;
	ret = add_client_resource(handler->client, &r->resource, GFP_ATOMIC);
717
718
	if (ret < 0)
		goto failed;
719

720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
	if (handler->client->version < FW_CDEV_VERSION_EVENT_REQUEST2) {
		struct fw_cdev_event_request *req = &e->req.request;

		if (tcode & 0x10)
			tcode = TCODE_LOCK_REQUEST;

		req->type	= FW_CDEV_EVENT_REQUEST;
		req->tcode	= tcode;
		req->offset	= offset;
		req->length	= length;
		req->handle	= r->resource.handle;
		req->closure	= handler->closure;
		event_size0	= sizeof(*req);
	} else {
		struct fw_cdev_event_request2 *req = &e->req.request2;

		req->type	= FW_CDEV_EVENT_REQUEST2;
		req->tcode	= tcode;
		req->offset	= offset;
		req->source_node_id = source;
		req->destination_node_id = destination;
		req->card	= card->index;
		req->generation	= generation;
		req->length	= length;
		req->handle	= r->resource.handle;
		req->closure	= handler->closure;
		event_size0	= sizeof(*req);
	}
748

749
	queue_event(handler->client, &e->event,
750
		    &e->req, event_size0, r->data, length);
751
752
753
	return;

 failed:
754
	kfree(r);
755
	kfree(e);
756
757
758
	kfree(fcp_frame);

	if (!is_fcp_request(request))
759
		fw_send_response(card, request, RCODE_CONFLICT_ERROR);
760
761

	fw_card_put(card);
762
763
}

764
765
static void release_address_handler(struct client *client,
				    struct client_resource *resource)
766
{
767
768
	struct address_handler_resource *r =
	    container_of(resource, struct address_handler_resource, resource);
769

770
771
	fw_core_remove_address_handler(&r->handler);
	kfree(r);
772
773
}

774
static int ioctl_allocate(struct client *client, union ioctl_arg *arg)
775
{
776
	struct fw_cdev_allocate *a = &arg->allocate;
777
	struct address_handler_resource *r;
778
	struct fw_address_region region;
779
	int ret;
780

781
782
	r = kmalloc(sizeof(*r), GFP_KERNEL);
	if (r == NULL)
783
784
		return -ENOMEM;

785
	region.start = a->offset;
786
787
788
789
790
	if (client->version < FW_CDEV_VERSION_ALLOCATE_REGION_END)
		region.end = a->offset + a->length;
	else
		region.end = a->region_end;

791
	r->handler.length           = a->length;
792
	r->handler.address_callback = handle_request;
793
794
795
	r->handler.callback_data    = r;
	r->closure   = a->closure;
	r->client    = client;
796

797
	ret = fw_core_add_address_handler(&r->handler, &region);
798
	if (ret < 0) {
799
		kfree(r);
800
		return ret;
801
	}
802
	a->offset = r->handler.offset;
803

804
805
	r->resource.release = release_address_handler;
	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
806
	if (ret < 0) {
807
		release_address_handler(client, &r->resource);
808
809
		return ret;
	}
810
	a->handle = r->resource.handle;
811
812
813
814

	return 0;
}

815
static int ioctl_deallocate(struct client *client, union ioctl_arg *arg)
816
{
817
	return release_client_resource(client, arg->deallocate.handle,
818
				       release_address_handler, NULL);
819
820
}

821
static int ioctl_send_response(struct client *client, union ioctl_arg *arg)
822
{
823
	struct fw_cdev_send_response *a = &arg->send_response;
824
	struct client_resource *resource;
825
	struct inbound_transaction_resource *r;
826
	int ret = 0;
827

828
	if (release_client_resource(client, a->handle,
829
				    release_request, &resource) < 0)
830
		return -EINVAL;
831

832
833
	r = container_of(resource, struct inbound_transaction_resource,
			 resource);
834
835
836
	if (is_fcp_request(r->request))
		goto out;

837
838
839
840
841
842
	if (a->length != fw_get_response_length(r->request)) {
		ret = -EINVAL;
		kfree(r->request);
		goto out;
	}
	if (copy_from_user(r->data, u64_to_uptr(a->data), a->length)) {
843
844
845
		ret = -EFAULT;
		kfree(r->request);
		goto out;
846
	}
847
	fw_send_response(r->card, r->request, a->rcode);
848
 out:
849
	fw_card_put(r->card);
850
851
	kfree(r);

852
	return ret;
853
854
}

855
static int ioctl_initiate_bus_reset(struct client *client, union ioctl_arg *arg)
856
{
857
	fw_schedule_bus_reset(client->device->card, true,
858
			arg->initiate_bus_reset.type == FW_CDEV_SHORT_RESET);
859
	return 0;
860
861
}

862
863
864
static void release_descriptor(struct client *client,
			       struct client_resource *resource)
{
865
866
	struct descriptor_resource *r =
		container_of(resource, struct descriptor_resource, resource);
867

868
869
	fw_core_remove_descriptor(&r->descriptor);
	kfree(r);
870
871
}

872
static int ioctl_add_descriptor(struct client *client, union ioctl_arg *arg)
873
{
874
	struct fw_cdev_add_descriptor *a = &arg->add_descriptor;
875
	struct descriptor_resource *r;
876
	int ret;
877

878
	/* Access policy: Allow this ioctl only on local nodes' device files. */
879
	if (!client->device->is_local)
880
881
		return -ENOSYS;

882
	if (a->length > 256)
883
884
		return -EINVAL;

885
	r = kmalloc(sizeof(*r) + a->length * 4, GFP_KERNEL);
886
	if (r == NULL)
887
888
		return -ENOMEM;

889
	if (copy_from_user(r->data, u64_to_uptr(a->data), a->length * 4)) {
890
891
		ret = -EFAULT;
		goto failed;
892
893
	}

894
895
896
	r->descriptor.length    = a->length;
	r->descriptor.immediate = a->immediate;
	r->descriptor.key       = a->key;
897
	r->descriptor.data      = r->data;
898

899
	ret = fw_core_add_descriptor(&r->descriptor);
900
901
	if (ret < 0)
		goto failed;
902

903
904
	r->resource.release = release_descriptor;
	ret = add_client_resource(client, &r->resource, GFP_KERNEL);
905
	if (ret < 0) {
906
		fw_core_remove_descriptor(&r->descriptor);
907
908
		goto failed;
	}
909
	a->handle = r->resource.handle;
910
911

	return 0;
912
 failed:
913
	kfree(r);
914
915

	return ret;
916
917
}

Stefan Richter's avatar