[prev in list] [next in list] [prev in thread] [next in thread]
List: packet-writing
Subject: Re: [packet-writing] Where to get that patch?
From: Cory <ssaver () crosswinds ! net>
Date: 2001-03-24 5:05:24
[Download RAW message or body]
Johnathan Hicks wrote:
> So where can I get that patch? If I knew of a list archive I'd search
> it, but I don't.
>
> -John
Here is the 0.0.2f patch release. It's in two files, so here's how you're gonna need to apply them:
In /usr/src/linux, use the first patch like this:
patch -p0 < packet-0.0.0f.diff
Once that's merged, cd to drivers/block and:
patch -p0 < packet-0.0.0f-update.diff
Then, you'll need to edit include/linux/pktcdvd.h and add the following:
...
...
struct pktcdvd_device
{
kdev_t dev;
struct block_device *bdev; // Add this
kdev_t pkt_dev;
...
...
}
That should allow you to compile it into your kernel.
Good luck.
Cory (ssaver@crosswinds.net)
["packet-0.0.2f.diff" (text/plain)]
diff -ruN linux/Documentation/Configure.help \
linux-packet/Documentation/Configure.help
--- linux/Documentation/Configure.help Wed Mar 21 03:07:03 2001
+++ linux-packet/Documentation/Configure.help Tue Mar 13 11:33:06 2001
@@ -511,6 +511,31 @@
say M here and read Documentation/modules.txt. The module will be
called ide-cd.o.
+
+Packet writing on CD/DVD media (EXPERIMENTAL)
+CONFIG_CDROM_PKTCDVD
+ If you have a CDROM drive that supports packet writing, say Y to
+ include preliminary support. It should work with any MMC/Mt Fuji
+ complain ATAPI or SCSI drive, which is just about any newer CD
+ writer.
+
+ Currently only writing to CD-RW discs is possible.
+
+ If you want to compile the driver as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want),
+ say M here and read Documentation/modules.txt. The module will be
+ called packet.o.
+
+Free buffers
+CONFIG_CDROM_PKTCDVD_BUFFERS
+ This controls the amount of free buffers that are allocated for
+ data gathering. More buffers speed up big writes at the cost of
+ latency and a bigger memory requirement (2KB per buffer).
+
+ This option has no effect at all if the CD-RW is used with other
+ file systems (or without a file system).
+
+
Include IDE/ATAPI TAPE support
CONFIG_BLK_DEV_IDETAPE
If you have an IDE tape drive using the ATAPI protocol, say Y.
diff -ruN linux/arch/sparc64/kernel/ioctl32.c \
linux-packet/arch/sparc64/kernel/ioctl32.c
--- linux/arch/sparc64/kernel/ioctl32.c Wed Mar 21 03:07:05 2001
+++ linux-packet/arch/sparc64/kernel/ioctl32.c Tue Mar 13 11:33:06 2001
@@ -88,6 +88,7 @@
#include <linux/atm_tcp.h>
#include <linux/sonet.h>
#include <linux/atm_suni.h>
+#include <linux/pktcdvd.h>
/* Use this to get at 32-bit user passed pointers.
See sys_sparc32.c for description about these. */
@@ -716,6 +717,37 @@
return ret;
}
+struct packet_stats32 {
+ u32 bh_s;
+ u32 bh_e;
+ u32 bh_w;
+ u32 bh_r;
+};
+
+static inline int pkt_getstats(unsigned int fd, unsigned int cmd, unsigned long arg)
+{
+ struct packet_stats p;
+ struct packet_stats32 p32;
+ mm_segment_t old_fs = get_fs();
+ int ret;
+
+ ret = copy_from_user (&p32, (struct packet_stats32 *)arg, sizeof(struct \
packet_stats32)); + if (ret)
+ return -EFAULT;
+#define P(x) (p.x = (unsigned long)p32.x)
+ P(bh_s);
+ P(bh_e);
+ P(bh_w);
+ P(bh_r);
+#undef P
+
+ set_fs (KERNEL_DS);
+ ret = sys_ioctl (fd, cmd, (long)&p);
+ set_fs (old_fs);
+
+ return ret;
+}
+
struct hd_geometry32 {
unsigned char heads;
unsigned char sectors;
@@ -3611,6 +3643,12 @@
COMPATIBLE_IOCTL(WIOCSTART)
COMPATIBLE_IOCTL(WIOCSTOP)
COMPATIBLE_IOCTL(WIOCGSTAT)
+/* Big X, CDRW Packet Driver */
+#if defined(CONFIG_CDROM_PKTCDVD)
+COMPATIBLE_IOCTL(PACKET_SETUP_DEV)
+COMPATIBLE_IOCTL(PACKET_TEARDOWN_DEV)
+HANDLE_IOCTL(PACKET_GET_STATS, pkt_getstats)
+#endif /* CONFIG_CDROM_PKTCDVD */
/* And these ioctls need translation */
HANDLE_IOCTL(SIOCGIFNAME, dev_ifname32)
HANDLE_IOCTL(SIOCGIFCONF, dev_ifconf)
diff -ruN linux/drivers/block/Config.in linux-packet/drivers/block/Config.in
--- linux/drivers/block/Config.in Sat Feb 3 12:13:19 2001
+++ linux-packet/drivers/block/Config.in Tue Mar 13 11:33:06 2001
@@ -37,6 +37,11 @@
dep_tristate 'Compaq Smart Array 5xxx support' CONFIG_BLK_CPQ_CISS_DA $CONFIG_PCI
dep_tristate 'Mylex DAC960/DAC1100 PCI RAID Controller support' \
CONFIG_BLK_DEV_DAC960 $CONFIG_PCI
+tristate 'Packet writing on CD/DVD media' CONFIG_CDROM_PKTCDVD
+if [ "$CONFIG_CDROM_PKTCDVD" != "n" ]; then
+ int ' Free buffers for data gathering' CONFIG_CDROM_PKTCDVD_BUFFERS 256
+fi
+
tristate 'Loopback device support' CONFIG_BLK_DEV_LOOP
dep_tristate 'Network block device support' CONFIG_BLK_DEV_NBD $CONFIG_NET
diff -ruN linux/drivers/block/Makefile linux-packet/drivers/block/Makefile
--- linux/drivers/block/Makefile Fri Dec 29 14:07:21 2000
+++ linux-packet/drivers/block/Makefile Tue Mar 13 11:33:06 2001
@@ -10,7 +10,7 @@
O_TARGET := block.o
-export-objs := ll_rw_blk.o blkpg.o loop.o DAC960.o
+export-objs := ll_rw_blk.o blkpg.o loop.o DAC960.o elevator.o
obj-y := ll_rw_blk.o blkpg.o genhd.o elevator.o
@@ -31,6 +31,8 @@
obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
obj-$(CONFIG_BLK_DEV_NBD) += nbd.o
+
+obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
subdir-$(CONFIG_PARIDE) += paride
diff -ruN linux/drivers/block/elevator.c linux-packet/drivers/block/elevator.c
--- linux/drivers/block/elevator.c Thu Feb 15 16:58:34 2001
+++ linux-packet/drivers/block/elevator.c Tue Mar 13 11:33:06 2001
@@ -220,3 +220,6 @@
*elevator = type;
elevator->queue_ID = queue_ID++;
}
+
+EXPORT_SYMBOL(elevator_init);
+EXPORT_SYMBOL(bh_rq_in_between);
diff -ruN linux/drivers/block/ll_rw_blk.c linux-packet/drivers/block/ll_rw_blk.c
--- linux/drivers/block/ll_rw_blk.c Wed Mar 21 03:07:05 2001
+++ linux-packet/drivers/block/ll_rw_blk.c Wed Mar 21 03:07:47 2001
@@ -667,6 +667,7 @@
req->bhtail = next->bhtail;
req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors;
list_del(&next->queue);
+ req->end_io = next->end_io;
blkdev_release_request(next);
}
@@ -707,6 +708,7 @@
count = bh->b_size >> 9;
sector = bh->b_rsector;
+ bh->b_queue = q;
rw_ahead = 0; /* normal case; gets changed below for READA */
switch (rw) {
@@ -1144,8 +1146,11 @@
nsect = bh->b_size >> 9;
blk_finished_io(nsect);
req->bh = bh->b_reqnext;
+ if (req->bh && (bh->b_rsector + (bh->b_size >> 9)) != req->bh->b_rsector)
+ printk("%s: %lu is followed by %lu\n", name, bh->b_rsector, req->bh->b_rsector);
bh->b_reqnext = NULL;
bh->b_end_io(bh, uptodate);
+ bh->b_queue = NULL;
if ((bh = req->bh) != NULL) {
req->hard_sector += nsect;
req->hard_nr_sectors -= nsect;
@@ -1168,6 +1173,8 @@
{
if (req->sem != NULL)
up(req->sem);
+ if (req->end_io)
+ req->end_io(req);
blkdev_release_request(req);
}
diff -ruN linux/drivers/block/pktcdvd.c linux-packet/drivers/block/pktcdvd.c
--- linux/drivers/block/pktcdvd.c Wed Dec 31 16:00:00 1969
+++ linux-packet/drivers/block/pktcdvd.c Wed Mar 21 03:15:58 2001
@@ -0,0 +1,2164 @@
+/*
+ * Copyright (C) 2000 Jens Axboe <axboe@suse.de>
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License. See linux/COPYING for more information.
+ *
+ * Packet writing layer for ATAPI and SCSI CD-R, CD-RW, DVD-R, and
+ * DVD-RW devices (aka an exercise in block layer masturbation)
+ *
+ *
+ * TODO: (circa order of when I will fix it)
+ * - Only able to write on CD-RW media right now.
+ * - check host application code on media and set it in write page
+ * - Generic interface for UDF to submit large packets for variable length
+ * packet writing (kiovec of dirty pages)
+ * - (in correlation with above) interface for UDF <-> packet to negotiate
+ * a new location when a write fails.
+ * - handle OPC, especially for -RW media
+ *
+ * ------------------------------------------------------------------------
+ *
+ * 0.0.2d (26/10/2000)
+ * - (scsi) use implicit segment recounting for all hba's
+ * - fix speed setting, was consistenly off on most drives
+ * - only print capacity when opening for write
+ * - fix off-by-two error in getting/setting write+read speed (affected
+ * reporting as well as actual speed used)
+ * - possible to enable write caching on drive
+ * - do ioctl marshalling on sparc64 from Ben Collins <bcollins@debian.org>
+ * - avoid unaligned access on flags, should have been unsigned long of course
+ * - fixed missed wakeup in kpacketd
+ * - b_dev error (two places)
+ * - fix buffer head b_count bugs
+ * - fix hole merge bug, where tail could be added twice
+ * - fsync and invalidate buffers on close
+ * - check hash table for buffers first before using our own
+ * - add read-ahead
+ * - fixed several list races
+ * - fix proc reporting for more than one device
+ * - change to O_CREAT for creating devices
+ * - added media_change hook
+ * - added free buffers config option
+ * - pkt_lock_tray fails on failed open (and oopses), remove it. unlock
+ * is done explicitly in pkt_remove dev anyway.
+ * - added proper elevator insertion (should probably be part of elevator.c)
+ * - moved kernel thread info to private device, spawn one for each writer
+ * - added separate buffer list for dirty packet buffers
+ * - fixed nasty data corruption bug
+ * - remember to account request even when we don't gather data for it
+ * - add ioctl to force wakeup of kernel thread (for debug)
+ * - fixed packet size setting bug on zero detected
+ * - changed a lot of the proc reporting to be more readable to "humans"
+ * - set full speed for read-only opens
+ *
+ * 0.0.2c (08/09/2000)
+ * - inc usage count of buffer heads
+ * - add internal buffer pool to avoid deadlock on oom
+ * - gather data for as many buffers as we have, before initiating write. this
+ * allows the laser to stay on longer, giving better performance.
+ * - fix always busy when tray can't be locked
+ * - remove request duplication nastiness, inject directly into the target
+ * - adapted to devfs and elevator changes
+ * - added proc interface
+ *
+ * 0.0.2b (21/06/2000)
+ * - fix io_request_lock typos (missing '&')
+ * - grab pkt_sem before invoking pkt_handle_queue
+ * - SCSI uses queuedata too, mirror that in pd->queuedata (hack)
+ * - remove SCSI sr debug messages
+ * - really activate empty block querying (requires cvs UDF, CDRW branch)
+ * - make sure sync_buffers doesn't consider us, or we can deadlock
+ * - make sure people don't swap on us (for now ;)
+ *
+ * 0.0.2a (19/06/2000)
+ * - add kpacketd kernel thread to handle actual data gathering
+ * - pd->pkt_dev is now real device, not just minor
+ * - add support for super_operations block_empty fn, to query fs for
+ * unused blocks that don't need reading
+ * - "cache" blocks that are contained in the UDF file/dir packet
+ * - rewrite pkt_gather_data to a one-step solution
+ * - add private pktcdvd elevator
+ * - shutdown write access to device upon write failure
+ * - fix off-by-one bug in capacity
+ * - setup sourceforge project (packet-cd.sourceforge.net)
+ * - add more blk ioctls to pkt_ioctl
+ * - set inactive request queue head
+ * - change panic calls to BUG, better with kdb
+ * - have pkt_gather_data check correct block size and kill rq if wrong
+ * - rework locking
+ * - introduce per-pd queues, simplifies pkt_request
+ * - store pd in queuedata
+ *
+ *************************************************************************/
+
+#define VERSION_CODE "v0.0.2f 21/03/2001 Jens Axboe (axboe@suse.de)"
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/malloc.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/locks.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/file.h>
+#include <linux/blk.h>
+#include <linux/blkpg.h>
+#include <linux/cdrom.h>
+#include <linux/ide.h>
+#include <linux/smp_lock.h>
+#include <linux/pktcdvd.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/unaligned.h>
+#include <asm/uaccess.h>
+
+#define SCSI_IOCTL_SEND_COMMAND 1
+
+/*
+ * 32 buffers of 2048 bytes
+ */
+#define PACKET_MAX_SIZE 32
+
+#define NEXT_BH(bh, nbh) (((bh)->b_rsector + ((bh)->b_size >> 9)) == \
(nbh)->b_rsector) +
+#define BH_IN_ORDER(b1, b2) ((b1)->b_rsector < (b2)->b_rsector)
+
+#define CONTIG_BH(b1, b2) ((b1)->b_data + (b1)->b_size == (b2)->b_data)
+
+#define ZONE(sector, pd) \
+ (((sector) + ((pd)->offset)) - (((sector) + ((pd)->offset)) % \
((pd)->settings.size))) +
+static int *pkt_sizes;
+static int *pkt_blksize;
+static int *pkt_readahead;
+static struct pktcdvd_device *pkt_devs;
+
+/*
+ * a bit of a kludge, but we want to be able to pass both real and packet
+ * dev and get the right one.
+ */
+static inline struct pktcdvd_device *pkt_find_dev(kdev_t dev)
+{
+ int i;
+
+ for (i = 0; i < MAX_WRITERS; i++)
+ if (pkt_devs[i].dev == dev || pkt_devs[i].pkt_dev == dev)
+ return &pkt_devs[i];
+
+ return NULL;
+}
+
+static void pkt_recheck_segments(struct request *rq)
+{
+ struct buffer_head *bh;
+ int nr_segments = 1, sectors;
+
+ bh = rq->bh;
+ sectors = bh->b_size >> 9;
+
+ while (bh->b_reqnext) {
+ if (!CONTIG_BH(bh, bh->b_reqnext))
+ nr_segments++;
+ bh = bh->b_reqnext;
+ sectors += bh->b_size >> 9;
+ }
+
+ rq->nr_segments = nr_segments;
+
+ if (sectors != rq->nr_sectors) {
+ printk("tell jens, %u != %lu\n", sectors, rq->nr_sectors);
+ BUG();
+ }
+}
+
+/*
+ * The following three functions are the plugins to the ll_rw_blk
+ * layer and decides whether a given request / buffer head can be
+ * merged. We differ in a couple of ways from "normal" block
+ * devices:
+ *
+ * - don't merge when the buffer / request crosses a packet block
+ * boundary
+ * - merge buffer head even though it can't be added directly to the
+ * front or back of the list. this gives us better performance, since
+ * what would otherwise require multiple requests can now be handled
+ * in one (hole merging)
+ * - we only care about write merging, reads use device original defaults.
+ *
+ * The device original merge_ functions are stored in the packet device
+ * queue (pd->q)
+ *
+ */
+static inline int pkt_do_merge(request_queue_t *q, struct request *rq,
+ struct buffer_head *bh, int max_segs,
+ merge_request_fn *merge_fn,
+ struct pktcdvd_device *pd)
+{
+ void *ptr = q->queuedata;
+ int ret;
+
+#if 0
+ VPRINTK("pkt_do_merge: cmd=%d\n", rq->cmd);
+#endif
+
+ if (rq->cmd == WRITE && ZONE(rq->sector, pd) != ZONE(bh->b_rsector, pd))
+ return ELEVATOR_NO_MERGE;
+
+ q->queuedata = pd->cdrw.queuedata;
+ ret = merge_fn(q, rq, bh, max_segs);
+ q->queuedata = ptr;
+ return ret;
+}
+
+static int pkt_front_merge_fn(request_queue_t *q, struct request *rq,
+ struct buffer_head *bh, int max_segs)
+{
+ struct pktcdvd_device *pd = pkt_find_dev(rq->rq_dev);
+
+#if 0
+ VPRINTK("front_merge_fn: cmd=%d\n", rq->cmd);
+#endif
+
+ return pkt_do_merge(q, rq, bh, max_segs, pd->cdrw.front_merge_fn, pd);
+}
+
+static int pkt_back_merge_fn(request_queue_t *q, struct request *rq,
+ struct buffer_head *bh, int max_segs)
+{
+ struct pktcdvd_device *pd = pkt_find_dev(rq->rq_dev);
+
+#if 0
+ VPRINTK("back_merge: cmd=%d\n", rq->cmd);
+#endif
+
+ return pkt_do_merge(q, rq, bh, max_segs, pd->cdrw.back_merge_fn, pd);
+}
+
+/*
+ * rules similar to above
+ */
+static int pkt_merge_requests_fn(request_queue_t *q, struct request *rq,
+ struct request *next, int max_segs)
+{
+ struct pktcdvd_device *pd = pkt_find_dev(rq->rq_dev);
+ struct packet_cdrw *cdrw = &pd->cdrw;
+ void *ptr = q->queuedata;
+ int ret;
+
+#if 0
+ VPRINTK("merge_requests: cmd=%d\n", rq->cmd);
+#endif
+
+ if (ZONE(rq->sector, pd) != ZONE(next->sector + next->nr_sectors, pd))
+ return 0;
+
+ q->queuedata = cdrw->queuedata;
+ ret = cdrw->merge_requests_fn(q, rq, next, max_segs);
+ q->queuedata = ptr;
+ return ret;
+}
+
+static int pkt_grow_bhlist(struct pktcdvd_device *pd, int count)
+{
+ struct packet_cdrw *cdrw = &pd->cdrw;
+ struct buffer_head *bh;
+ int i = 0;
+ char *data;
+
+#if 0
+ VPRINTK("grow_bhlist: count=%d\n", count);
+#endif
+
+ data = kmalloc(CD_FRAMESIZE*count, GFP_KERNEL);
+ if (!data)
+ return 0;
+
+ while (i < count) {
+ bh = kmalloc(sizeof(struct buffer_head), GFP_KERNEL);
+ if (bh == NULL)
+ break;
+
+#if 0
+ bh->b_data = kmalloc(CD_FRAMESIZE, GFP_KERNEL);
+ if (bh->b_data == NULL) {
+ kfree(bh);
+ break;
+ }
+#else
+ bh->b_data = &data[CD_FRAMESIZE*i];
+#endif
+ spin_lock_irq(&pd->lock);
+ bh->b_pprev = &cdrw->bhlist;
+ bh->b_next = cdrw->bhlist;
+ cdrw->bhlist = bh;
+ spin_unlock_irq(&pd->lock);
+
+ bh->b_size = CD_FRAMESIZE;
+ bh->b_list = PKT_BUF_LIST;
+ atomic_inc(&cdrw->free_bh);
+ i++;
+ }
+ return i;
+}
+
+static int pkt_shrink_bhlist(struct pktcdvd_device *pd, int count)
+{
+ struct packet_cdrw *cdrw = &pd->cdrw;
+ struct buffer_head *bh;
+ int i = 0;
+
+#if 0
+ VPRINTK("shrink_bhlist: count=%d\n", count);
+#endif
+
+ while ((i < count) && cdrw->bhlist) {
+ spin_lock_irq(&pd->lock);
+ bh = cdrw->bhlist;
+ cdrw->bhlist = bh->b_next;
+ spin_unlock_irq(&pd->lock);
+ if (bh->b_list != PKT_BUF_LIST)
+ BUG();
+ kfree(bh->b_data);
+ kfree(bh);
+ atomic_dec(&cdrw->free_bh);
+ i++;
+ }
+ return i;
+}
+
+static request_queue_t *pkt_return_queue(kdev_t dev)
+{
+ struct pktcdvd_device *pd = pkt_find_dev(dev);
+
+ return &pd->cdrw.r_queue;
+}
+
+static void pkt_end_io_read(struct buffer_head *bh, int uptodate)
+{
+#if 0
+ VPRINTK("end_io_read: bh=%ld, uptodate=%d\n", bh->b_blocknr, uptodate);
+#endif
+
+ mark_buffer_uptodate(bh, uptodate);
+ unlock_buffer(bh);
+}
+
+/*
+ * if the buffer is already in the buffer cache, grab it if we can lock
+ * it down
+ */
+static inline struct buffer_head *pkt_get_hash(kdev_t dev, unsigned long block,
+ int size)
+{
+#if 0
+ struct buffer_head *bh;
+
+ if ((bh = get_hash_table(dev, block, size))) {
+ if (!test_and_set_bit(BH_Lock, &bh->b_state)) {
+ atomic_set_buffer_clean(bh);
+ return bh;
+ }
+ printk("buffer %lu was already locked\n", bh->b_rsector);
+ brelse(bh);
+ }
+#endif
+
+ return NULL;
+}
+
+static void pkt_end_io_write(struct buffer_head *, int);
+
+static struct buffer_head *pkt_get_buffer(struct pktcdvd_device *pd,
+ unsigned long sector, int size)
+{
+ struct buffer_head *bh;
+
+#if 0
+ VPRINTK("get_buffer: sector=%ld, size=%d\n", sector, size);
+#endif
+
+ if ((bh = pkt_get_hash(pd->pkt_dev, sector / (size >> 9), size))) {
+ bh->b_private = pd;
+ bh->b_end_io = pkt_end_io_write;
+ goto out;
+ }
+
+ /*
+ * should not happen...
+ */
+ if (!atomic_read(&pd->cdrw.free_bh)) {
+ printk("pktcdvd: no buffers available!\n");
+ BUG();
+ }
+
+ atomic_dec(&pd->cdrw.free_bh);
+ atomic_inc(&pd->cdrw.pending_bh);
+
+ spin_lock_irq(&pd->lock);
+ bh = pd->cdrw.bhlist;
+ pd->cdrw.bhlist = bh->b_next;
+ bh->b_next = NULL;
+ spin_unlock_irq(&pd->lock);
+
+ init_buffer(bh, pkt_end_io_read, pd);
+
+ bh->b_next_free = NULL;
+ bh->b_prev_free = NULL;
+ bh->b_this_page = NULL;
+ bh->b_pprev = NULL;
+ bh->b_reqnext = NULL;
+
+ init_waitqueue_head(&bh->b_wait);
+ atomic_set(&bh->b_count, 1);
+ bh->b_blocknr = sector / (size >> 9);
+ bh->b_list = PKT_BUF_LIST;
+ bh->b_state = (1 << BH_Mapped) | (1 << BH_Lock) | (1 << BH_Req);
+
+out:
+ bh->b_rsector = sector;
+#if 0
+ bh->b_rdev = pd->pkt_dev;
+ bh->b_dev = pd->pkt_dev;
+#else
+ bh->b_rdev = pd->dev;
+ bh->b_dev = pd->dev;
+#endif
+ return bh;
+}
+
+static void pkt_put_buffer(struct buffer_head *bh)
+{
+ struct pktcdvd_device *pd = bh->b_private;
+ unsigned long flags;
+
+#if 0
+ VPRINTK("put_buffer: bh=%ld\n", bh->b_blocknr);
+#endif
+
+ if (bh->b_list != PKT_BUF_LIST)
+ BUG();
+
+ if (atomic_read(&bh->b_count))
+ printk("pktcdvd: put_buffer: busy buffer\n");
+
+ bh->b_private = NULL;
+ bh->b_state = 0;
+ bh->b_reqnext = NULL;
+
+ spin_lock_irqsave(&pd->lock, flags);
+ bh->b_next = pd->cdrw.bhlist;
+ pd->cdrw.bhlist = bh;
+ spin_unlock_irqrestore(&pd->lock, flags);
+ atomic_inc(&pd->cdrw.free_bh);
+ atomic_dec(&pd->cdrw.pending_bh);
+}
+
+/*
+ * we use this as our default b_end_io handler, since we need to take
+ * the entire request off the list if just on of the clusters fail.
+ * later one this should also talk to UDF about relocating blocks -- for
+ * now we just drop the rq entirely. when doing the relocating we must also
+ * lock the bh down to ensure that we can easily reconstruct the write should
+ * it fail.
+ */
+static void pkt_end_io_write(struct buffer_head *bh, int uptodate)
+{
+ struct pktcdvd_device *pd = (struct pktcdvd_device *) bh->b_private;
+
+#if 0
+ VPRINTK("end_io_write: bh=%ld, uptodate=%d\n", bh->b_blocknr, uptodate);
+#endif
+
+ atomic_set_buffer_clean(bh);
+ clear_bit(BH_Req, &bh->b_state);
+
+ mark_buffer_uptodate(bh, uptodate);
+ unlock_buffer(bh);
+
+ if (bh->b_list == PKT_BUF_LIST) {
+ brelse(bh);
+ pkt_put_buffer(bh);
+ }
+
+ /*
+ * obviously, more needs to be done here.
+ */
+ if (!uptodate) {
+ printk("pktcdvd: %s: write error\n", pd->name);
+ set_bit(PACKET_READONLY, &pd->flags);
+ }
+ pd->stats.bh_e++;
+}
+
+static void pkt_init_bh(struct pktcdvd_device *pd, struct request *rq)
+{
+ struct buffer_head *bh = rq->bh;
+ unsigned cnt = 0;
+
+#if 0
+ VPRINTK("init_bh: cmd=%d, bh=%ld\n", rq->cmd, bh->b_blocknr);
+#endif
+
+ while (bh) {
+#if 1
+ if (bh->b_list == PKT_BUF_LIST) {
+ bh->b_private = pd;
+ bh->b_end_io = pkt_end_io_write;
+ blk_started_io(bh->b_size >> 9);
+ }
+#else
+ bh->b_end_io = pkt_end_io_write;
+ bh->b_private = pd;
+#endif
+
+ /*
+ * the buffer better be uptodate, mapped, and locked!
+ */
+ if (!buffer_uptodate(bh))
+ printk("%lu not uptodate\n", bh->b_rsector);
+ if (!buffer_locked(bh))
+ printk("%lu not locked\n", bh->b_rsector);
+ if (!buffer_mapped(bh))
+ printk("%lu not mapped\n", bh->b_rsector);
+
+ /*
+ * if this happens, do report
+ */
+ if (bh->b_reqnext) {
+ if ((bh->b_rsector + (bh->b_size >> 9)) != bh->b_reqnext->b_rsector)
+ printk("tell jens, %lu follows %lu\n", bh->b_reqnext->b_rsector, bh->b_rsector);
+ if (bh->b_rsector >= bh->b_reqnext->b_rsector)
+
+ printk("tell jens, order %lu >= %lu\n", bh->b_rsector, \
bh->b_reqnext->b_rsector); + }
+ bh = bh->b_reqnext;
+ cnt += rq->current_nr_sectors;
+ }
+
+ if (cnt != rq->nr_sectors) {
+ printk("botched request %u (%lu)\n", cnt, rq->nr_sectors);
+ BUG();
+ }
+}
+
+/*
+ * really crude stats for now...
+ */
+static void pkt_account_rq(struct pktcdvd_device *pd, int read, int written,
+ int bs)
+{
+ pd->stats.bh_s += (written / bs);
+ pd->stats.secs_w += written;
+ pd->stats.secs_r += read;
+}
+
+/*
+ * does request span two packets? 0 == yes, 1 == no
+ */
+static int pkt_same_zone(struct pktcdvd_device *pd, struct request *rq)
+{
+ if (!pd->settings.size)
+ return 0;
+
+ return ZONE(rq->sector, pd) == ZONE(rq->sector + rq->nr_sectors -1, pd);
+}
+
+#if defined(CDROM_CDROM_PKTCDVD_BLOCKFREE)
+static void pkt_init_buffer(struct buffer_head *bh)
+{
+ set_bit(BH_Uptodate, &bh->b_state);
+ set_bit(BH_Dirty, &bh->b_state);
+ memset(bh->b_data, 0, bh->b_size);
+}
+
+static int pkt_sb_empty(struct pktcdvd_device *pd, struct buffer_head *bh)
+{
+ struct super_block *sb = get_super(pd->pkt_dev);
+ struct super_operations *sop = sb ? sb->s_op : NULL;
+ unsigned long packet = 0, blocknr = bh->b_blocknr;
+
+ if (sop && sop->block_empty) {
+ if (sop->block_empty(sb, blocknr, &packet)) {
+ pkt_init_buffer(pd, bh);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+#else /* defined(CDROM_CDROM_PKTCDVD_BLOCKFREE) */
+
+static int pkt_sb_empty(struct pktcdvd_device *pd, struct buffer_head *bh)
+{
+ return 0;
+}
+
+#endif /* defined(CDROM_CDROM_PKTCDVD_BLOCKFREE) */
+
+/*
+ * basically just does a ll_rw_block for the bhs given to use, but we
+ * don't return until we have them.
+ */
+static void pkt_read_bh(struct pktcdvd_device *pd, struct buffer_head *bh)
+{
+ /*
+ * UDF says it's empty, woohoo
+ */
+ if (pkt_sb_empty(pd, bh))
+ return;
+
+ generic_make_request(READ, bh);
+}
+
+inline void __pkt_kill_request(struct request *rq, int uptodate, char *name)
+{
+ while (end_that_request_first(rq, uptodate, name))
+ ;
+ end_that_request_last(rq);
+}
+
+
+void pkt_kill_request(struct request *rq, int uptodate, char *name, char *msg)
+{
+ printk("pktcdvd: killing request, reason: %s\n", msg);
+ spin_lock_irq(&io_request_lock);
+ __pkt_kill_request(rq, uptodate, name);
+ spin_unlock_irq(&io_request_lock);
+}
+
+/*
+ * fill in the holes of a request
+ *
+ * Returns: 0, keep 'em coming -- 1, stop queueing
+ */
+static int pkt_gather_data(struct pktcdvd_device *pd, struct request *rq)
+{
+ unsigned long start_s, end_s, sector;
+ struct buffer_head *bh;
+ unsigned int sectors;
+ struct buffer_head *bhs[PACKET_MAX_SIZE] = { NULL, };
+
+ /*
+ * all calculations are done with 512 byte sectors
+ */
+ sectors = pd->settings.size - rq->nr_sectors;
+ start_s = rq->sector - (rq->sector % pd->settings.size);
+ end_s = start_s + pd->settings.size;
+
+ VPRINTK("pkt_gather_data: cmd=%d\n", rq->cmd);
+ VPRINTK("need %d sectors for %s\n", sectors, kdevname(pd->dev));
+ VPRINTK("from %lu to %lu ", start_s, end_s);
+ VPRINTK("(%lu - %lu)\n", rq->bh->b_rsector, rq->bhtail->b_rsector +
+ rq->current_nr_sectors);
+
+ if (blksize_size[MAJOR(pd->dev)]) {
+ if (rq->bh->b_size != blksize_size[MAJOR(pd->dev)][MINOR(pd->dev)]) {
+ printk("pktcdvd: wrong (%u) block size\n", rq->bh->b_size);
+ pkt_kill_request(rq, 0, pd->name, "eek");
+ pd_lock(pd, 0);
+ pd->rq = NULL;
+ pd_unlock(pd);
+ return 1;
+ }
+ }
+
+ /*
+ * get remaining blocks
+ */
+ bh = rq->bh;
+#if 0
+ for (sector = (rq->bh->b_rsector % pd->settings.size);
+ sector < ((rq->bhtail->b_rsector + rq->current_nr_sectors) % pd->settings.size);
+ sector += (rq->bh->b_size >> 9))
+ {
+ bhs[sector / (bh->b_size >> 9)] = bh;
+ bh = bh->b_reqnext;
+ }
+#else
+ while (bh)
+ {
+ bhs[(bh->b_rsector % pd->settings.size) / (bh->b_size >> 9)] = bh;
+ bh = bh->b_reqnext;
+ }
+#endif
+
+ for (sector=start_s; sector<end_s; sector+=(rq->bh->b_size >> 9))
+ {
+ if (bhs[(sector % pd->settings.size) / (rq->bh->b_size >> 9)] == NULL)
+ {
+ bh = pkt_get_buffer(pd, sector, rq->bh->b_size);
+
+ if (!buffer_uptodate(bh))
+ {
+ VPRINTK("reading buffer %lu (%ld)\n",
+ bh->b_rsector,
+ bh->b_blocknr);
+ pkt_read_bh(pd, bh);
+ }
+
+ bhs[(sector % pd->settings.size) / (rq->bh->b_size >> 9)] = bh;
+ rq->nr_sectors += (rq->bh->b_size >> 9);
+ }
+ }
+
+
+ for (sector=PACKET_MAX_SIZE; sector>0; sector--)
+ {
+ if (!buffer_uptodate(bhs[sector-1]))
+ {
+ VPRINTK("waiting on buffer %lu (%ld)\n",
+ bhs[sector-1]->b_rsector,
+ bhs[sector-1]->b_blocknr);
+ lock_buffer(bhs[sector-1]);
+ if (!buffer_uptodate(bhs[sector-1]))
+ printk("huh, %lu not uptodate\n", bhs[sector-1]->b_rsector);
+ }
+ else if (!buffer_locked(bhs[sector-1]))
+ lock_buffer(bhs[sector-1]);
+
+ if (sector != PACKET_MAX_SIZE)
+ bhs[sector-1]->b_reqnext = bhs[sector];
+ else
+ bhs[sector-1]->b_reqnext = NULL;
+ }
+
+ rq->bh = bhs[0];
+ rq->bhtail = bhs[PACKET_MAX_SIZE-1];
+ rq->buffer = rq->bh->b_data;
+ rq->current_nr_sectors = rq->bh->b_size >> 9;
+ rq->hard_nr_sectors = rq->nr_sectors;
+ rq->sector = rq->hard_sector = start_s;
+// rq->cmd = WRITE_PACKET;
+
+ VPRINTK("unlocked last %lu\n", rq->bhtail->b_rsector);
+ pkt_recheck_segments(rq);
+ pkt_init_bh(pd, rq);
+ pkt_account_rq(pd, sectors, rq->nr_sectors, rq->current_nr_sectors);
+
+ /*
+ * sanity check
+ */
+ if (rq->nr_sectors != pd->settings.size) {
+ printk("pktcdvd: request mismatch %lu (should be %u)\n",
+ rq->nr_sectors, pd->settings.size);
+ BUG();
+ }
+ return 0;
+}
+
+static void pkt_rq_end_io(struct request *rq)
+{
+ struct pktcdvd_device *pd = pkt_find_dev(rq->rq_dev);
+
+ if (rq->cmd == WRITE_PACKET)
+ rq->cmd = WRITE;
+
+#if 0
+ VPRINTK("pkt_rq_end_io: cmd=%d, q=%p\n", rq->cmd, rq->q);
+#endif
+
+ pd_lock(pd, 1);
+
+ if (pd->rq == NULL)
+ printk("rq_end_io: no current rq\n");
+
+ pd->rq = NULL;
+ rq->end_io = NULL;
+
+ if (!test_and_clear_bit(PACKET_BUSY, &pd->flags))
+ printk("rq_end_io: BUSY not set\n");
+
+ if (!test_and_clear_bit(PACKET_RQ, &pd->flags))
+ printk("rq_end_io: RQ not set\n");
+
+ pd_unlock(pd);
+ wake_up(&pd->wqueue);
+}
+
+static inline void __pkt_inject_request(request_queue_t *q, struct request *req)
+{
+ struct list_head *head = &q->queue_head;
+
+#if 0
+ VPRINTK("__pkt_inject_request: list_empty == %d, size=%d, cmd=%d\n",
+ list_empty(&q->queue_head), req->bh->b_size >> 9, req->cmd);
+#endif
+
+ if (list_empty(&q->queue_head))
+ q->plug_device_fn(q, req->rq_dev);
+ else if (q->head_active && !q->plugged)
+ head = head->next;
+
+ if (!q->plugged && q->head_active && head == &q->queue_head)
+ {
+ spin_unlock_irq(&io_request_lock);
+ BUG();
+ }
+
+ list_add(&req->queue, head);
+}
+
+static void pkt_inject_request(request_queue_t *q, struct request *rq)
+{
+ rq->end_io = pkt_rq_end_io;
+ spin_lock_irq(&io_request_lock);
+ __pkt_inject_request(q, rq);
+ spin_unlock_irq(&io_request_lock);
+}
+
+/*
+ * Returns: 1, keep 'em coming -- 0, wait for wakeup
+ */
+static int pkt_do_request(struct pktcdvd_device *pd, struct request *rq,
+ request_queue_t *pdq)
+{
+ int ret;
+
+#if 0
+ VPRINTK("do_request: bh=%ld, nr_sectors=%ld, size=%d, cmd=%d\n", rq->bh->b_blocknr, \
rq->nr_sectors, pd->settings.size, rq->cmd); +#endif
+
+ /*
+ * perfect match. the merge_* functions have already made sure that
+ * a request doesn't cross a packet boundary, so if the sector
+ * count matches it's good.
+ */
+ if (rq->nr_sectors == pd->settings.size) {
+// rq->cmd = WRITE_PACKET;
+ pkt_account_rq(pd, 0, rq->nr_sectors, rq->current_nr_sectors);
+ return 0;
+ }
+
+ /*
+ * paranoia...
+ */
+ if (rq->nr_sectors > pd->settings.size) {
+ printk("pktcdvd: request too big! BUG! %lu\n", rq->nr_sectors);
+ BUG();
+ }
+
+ ret = pkt_gather_data(pd, rq);
+ if (ret) {
+ clear_bit(PACKET_RQ, &pd->flags);
+ clear_bit(PACKET_BUSY, &pd->flags);
+ pd_lock(pd, 0);
+ pd->rq = NULL;
+ pd_unlock(pd);
+ }
+ return ret;
+}
+
+/*
+ * handle the requests that got queued for this writer
+ *
+ * Locks: none
+ *
+ */
+static int pkt_handle_queue(struct pktcdvd_device *pd, request_queue_t *q)
+{
+ struct request *rq;
+ int ret;
+
+#if 0
+ VPRINTK("handle_queue\n");
+#endif
+
+ pd_lock(pd, 0);
+
+ /*
+ * nothing for us to do
+ */
+ if (!test_bit(PACKET_RQ, &pd->flags)) {
+ pd_unlock(pd);
+ return 1;
+ }
+
+ if (test_and_set_bit(PACKET_BUSY, &pd->flags)) {
+ pd_unlock(pd);
+ return 1;
+ }
+
+ rq = pd->rq;
+
+ pd_unlock(pd);
+
+ /*
+ * nothing to do
+ */
+ ret = 1;
+ if (rq == NULL) {
+ printk("handle_queue: pd BUSY+RQ, but no rq\n");
+ clear_bit(PACKET_RQ, &pd->flags);
+ goto out;
+ }
+
+ /*
+ * reads are shipped directly to cd-rom, so they should not
+ * pop up here
+ */
+ if (rq->cmd == READ)
+ BUG();
+
+ if ((rq->current_nr_sectors << 9) != CD_FRAMESIZE) {
+ pkt_kill_request(rq, 0, pd->name, "wrong size");
+ clear_bit(PACKET_RQ, &pd->flags);
+ pd->rq = NULL;
+ goto out;
+ }
+
+ if (!pkt_do_request(pd, rq, q)) {
+ pkt_inject_request(q, rq);
+ return 0;
+ }
+
+out:
+ clear_bit(PACKET_BUSY, &pd->flags);
+ return ret;
+}
+
+/*
+ * kpacketd is woken up, when writes have been queued for one of our
+ * registered devices
+ */
+static int kcdrwd(void *foobar)
+{
+ struct pktcdvd_device *pd = foobar;
+ request_queue_t *q, *my_queue;
+
+ set_bit(PACKET_THREAD, &pd->flags);
+ daemonize();
+ exit_files(current);
+
+ printk("pktcdvd: kernel thread %s started\n", pd->name);
+
+ current->session = 1;
+ current->pgrp = 1;
+ current->policy = SCHED_OTHER;
+ current->nice = -20;
+ sprintf(current->comm, pd->name);
+
+ spin_lock_irq(¤t->sigmask_lock);
+ siginitsetinv(¤t->blocked, sigmask(SIGKILL));
+ flush_signals(current);
+ spin_unlock_irq(¤t->sigmask_lock);
+
+ q = blk_get_queue(pd->dev);
+ my_queue = blk_get_queue(pd->pkt_dev);
+
+ for (;;) {
+ DECLARE_WAITQUEUE(wait, current);
+
+ add_wait_queue(&pd->wqueue, &wait);
+
+ /*
+ * if pkt_handle_queue returns true, we can queue
+ * another request. otherwise we need to unplug the
+ * cd-rom queue and wait for buffers to be flushed
+ * (which will then wake us up again when done).
+ */
+ do {
+ if (!pkt_handle_queue(pd, q))
+ break;
+
+ spin_lock_irq(&io_request_lock);
+ if (list_empty(&my_queue->queue_head)) {
+ spin_unlock_irq(&io_request_lock);
+ break;
+ }
+ my_queue->request_fn(my_queue);
+ spin_unlock_irq(&io_request_lock);
+ } while (1);
+
+ set_current_state(TASK_INTERRUPTIBLE);
+ VPRINTK("before generic_unplug_device\n");
+ generic_unplug_device(q);
+ VPRINTK("after generic_unplug_device\n");
+
+ schedule();
+ remove_wait_queue(&pd->wqueue, &wait);
+
+ /*
+ * got SIGKILL
+ */
+ if (signal_pending(current)) {
+ printk("pktcdvd: thread got SIGKILL\n");
+ break;
+ }
+
+ }
+ printk("pktcdvd: kernel thread %s stopped\n", pd->name);
+ clear_bit(PACKET_THREAD, &pd->flags);
+ return 0;
+}
+
+/*
+ * our request function.
+ *
+ * - reads are just tossed directly to the device, we don't care.
+ * - writes, regardless of size, are added as the current pd rq and
+ * kcdrwd is woken up to handle it. kcdrwd will also make sure to
+ * reinvoke this request handler, once the given request has been
+ * processed.
+ *
+ * Locks: io_request_lock held
+ *
+ * Notes: all writers have their own queue, so all requests are for the
+ * the same device
+ */
+static void pkt_request(request_queue_t *q)
+{
+ struct pktcdvd_device *pd = (struct pktcdvd_device *) q->queuedata;
+ request_queue_t *pdq = NULL;
+ int max_segments = MAX_SEGMENTS;
+
+#if 0
+ VPRINTK("pkt_request\n");
+#endif
+
+ if (list_empty(&q->queue_head))
+ return;
+
+ pdq = __blk_get_queue(pd->dev);
+
+ while (!list_empty(&q->queue_head)) {
+ struct request *rq = blkdev_entry_next_request(&q->queue_head);
+
+ rq->rq_dev = pd->dev;
+
+ if (rq->cmd == READ) {
+ blkdev_dequeue_request(rq);
+ __pkt_inject_request(pdq, rq);
+ continue;
+ }
+
+ /*
+ * UDF had a bug, where it submitted a write to a ro file
+ * system, this is just to prevent accidents like that from
+ * happening again
+ */
+ if (test_bit(PACKET_READONLY, &pd->flags)) {
+ blkdev_dequeue_request(rq);
+ __pkt_kill_request(rq, 0, pd->name);
+ continue;
+ }
+
+ /*
+ * paranoia, shouldn't trigger...
+ */
+ if (!pkt_same_zone(pd, rq))
+ BUG();
+
+ pd_lock(pd, 1);
+
+ /*
+ * already gathering data for another read. the
+ * rfn will be reinvoked once that is done
+ */
+ if (test_and_set_bit(PACKET_RQ, &pd->flags))
+ {
+ if (ZONE(pd->rq->sector, pd) == ZONE(rq->sector, pd))
+ {
+ struct request *trq = pd->rq;
+
+ if (pd->rq->sector > rq->sector)
+ {
+ pd->rq = rq;
+ rq = trq;
+ }
+
+ if (q->merge_requests_fn(q, pd->rq, rq, max_segments))
+ {
+ q->elevator.elevator_merge_req_fn(pd->rq, rq);
+ if (pd->rq->bhtail->b_data + pd->rq->bhtail->b_size == rq->bh->b_data)
+ pd->rq->nr_segments --;
+ pd->rq->nr_segments += rq->nr_segments - 1;
+ pd->rq->bhtail->b_reqnext = rq->bh;
+ pd->rq->bhtail = rq->bhtail;
+ pd->rq->nr_sectors = pd->rq->hard_nr_sectors += rq->hard_nr_sectors;
+ list_del(&rq->queue);
+ pd->rq->end_io = rq->end_io;
+ blkdev_dequeue_request(rq);
+ blkdev_release_request(rq);
+ pd_unlock(pd);
+ continue;
+ }
+
+ }
+ pd_unlock(pd);
+ break;
+ }
+
+ pd->rq = rq;
+ pd_unlock(pd);
+ blkdev_dequeue_request(rq);
+ }
+
+ wake_up(&pd->wqueue);
+}
+
+static void pkt_print_settings(struct pktcdvd_device *pd)
+{
+ printk("pktcdvd: %s packets, ", pd->settings.fp ? "Fixed" : "Variable");
+ printk("%u blocks / packet, ", pd->settings.size >> 2);
+ printk("Mode-%c disc\n", pd->settings.block_mode == 8 ? '1' : '2');
+}
+
+/*
+ * A generic sense dump / resolve mechanism should be implemented across
+ * all ATAPI + SCSI devices.
+ */
+static void pkt_dump_sense(struct request_sense *sense)
+{
+ char *info[9] = { "No sense", "Recovered error", "Not ready",
+ "Medium error", "Hardware error", "Illegal request",
+ "Unit attention", "Data protect", "Blank check" };
+
+ if (sense == NULL)
+ return;
+
+ if (sense->sense_key > 8) {
+ printk("pktcdvd: sense invalid\n");
+ return;
+ }
+
+ printk("pktcdvd: sense category %s ", info[sense->sense_key]);
+ printk("asc(%02x), ascq(%02x)\n", sense->asc, sense->ascq);
+}
+
+/*
+ * write mode select package based on pd->settings
+ */
+static int pkt_set_write_settings(struct pktcdvd_device *pd)
+{
+ struct cdrom_device_info *cdi = pd->cdi;
+ struct cdrom_generic_command cgc;
+ write_param_page *wp;
+ char buffer[128];
+ int ret, size;
+
+ memset(buffer, 0, sizeof(buffer));
+ init_cdrom_command(&cgc, buffer, sizeof(*wp), CGC_DATA_READ);
+ if ((ret = cdrom_mode_sense(cdi, &cgc, GPMODE_WRITE_PARMS_PAGE, 0)))
+ return ret;
+
+ size = 2 + ((buffer[0] << 8) | (buffer[1] & 0xff));
+ pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff);
+ if (size > sizeof(buffer))
+ size = sizeof(buffer);
+
+ /*
+ * now get it all
+ */
+ init_cdrom_command(&cgc, buffer, size, CGC_DATA_READ);
+ if ((ret = cdrom_mode_sense(cdi, &cgc, GPMODE_WRITE_PARMS_PAGE, 0)))
+ return ret;
+
+ /*
+ * write page is offset header + block descriptor length
+ */
+ wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + \
pd->mode_offset]; +
+ wp->fp = pd->settings.fp;
+ wp->track_mode = pd->settings.track_mode;
+ wp->write_type = pd->settings.write_type;
+ wp->data_block_type = pd->settings.block_mode;
+
+ wp->multi_session = 0;
+
+#ifdef PACKET_USE_LS
+ wp->link_size = 7;
+ wp->ls_v = 1;
+#endif
+
+ if (wp->data_block_type == PACKET_BLOCK_MODE1) {
+ wp->session_format = 0;
+ wp->subhdr2 = 0x20;
+ } else if (wp->data_block_type == PACKET_BLOCK_MODE2) {
+ wp->session_format = 0x20;
+ wp->subhdr2 = 8;
+#if 0
+ wp->mcn[0] = 0x80;
+ memcpy(&wp->mcn[1], PACKET_MCN, sizeof(wp->mcn) - 1);
+#endif
+ } else {
+ /*
+ * paranoia
+ */
+ printk("pktcdvd: write mode wrong %d\n", wp->data_block_type);
+ return 1;
+ }
+ wp->packet_size = cpu_to_be32(pd->settings.size >> 2);
+
+ cgc.buflen = cgc.cmd[8] = size;
+ if ((ret = cdrom_mode_select(cdi, &cgc))) {
+ pkt_dump_sense(cgc.sense);
+ return ret;
+ }
+
+ pkt_print_settings(pd);
+ return 0;
+}
+
+/*
+ * 0 -- we can write to this track, 1 -- we can't
+ */
+static int pkt_good_track(track_information *ti)
+{
+ /*
+ * only good for CD-RW at the moment, not DVD-RW
+ */
+
+ /*
+ * FIXME: only for FP
+ */
+ if (ti->fp == 0)
+ return 0;
+
+ /*
+ * "good" settings as per Mt Fuji.
+ */
+ if (ti->rt == 0 && ti->blank == 0 && ti->packet == 1)
+ return 0;
+
+ if (ti->rt == 0 && ti->blank == 1 && ti->packet == 1)
+ return 0;
+
+ if (ti->rt == 1 && ti->blank == 0 && ti->packet == 1)
+ return 0;
+
+ printk("pktcdvd: bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet);
+ return 1;
+}
+
+/*
+ * 0 -- we can write to this disc, 1 -- we can't
+ */
+static int pkt_good_disc(struct pktcdvd_device *pd, disc_information *di)
+{
+ /*
+ * for disc type 0xff we should probably reserve a new track.
+ * but i'm not sure, should we leave this to user apps? probably.
+ */
+ if (di->disc_type == 0xff) {
+ printk("pktcdvd: Unknown disc. No track?\n");
+ return 1;
+ }
+
+ if (di->disc_type != 0x20 && di->disc_type != 0) {
+ printk("pktcdvd: Wrong disc type (%x)\n", di->disc_type);
+ return 1;
+ }
+
+ if (di->erasable == 0) {
+ printk("pktcdvd: Disc not erasable\n");
+ return 1;
+ }
+
+ if (pd->track_status == PACKET_SESSION_RESERVED) {
+ printk("pktcdvd: Can't write to last track (reserved)\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static int pkt_probe_settings(struct pktcdvd_device *pd)
+{
+ disc_information di;
+ track_information ti;
+ int ret, track;
+
+ memset(&di, 0, sizeof(disc_information));
+ memset(&ti, 0, sizeof(track_information));
+
+ if ((ret = cdrom_get_disc_info(pd->dev, &di))) {
+ printk("failed get_disc\n");
+ return ret;
+ }
+
+ pd->disc_status = di.disc_status;
+ pd->track_status = di.border_status;
+
+ if (pkt_good_disc(pd, &di))
+ return -ENXIO;
+
+ printk("pktcdvd: inserted media is CD-R%s\n", di.erasable ? "W" : "");
+ pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR;
+
+ track = 1; /* (di.last_track_msb << 8) | di.last_track_lsb; */
+ if ((ret = cdrom_get_track_info(pd->dev, track, 1, &ti))) {
+ printk("pktcdvd: failed get_track\n");
+ return ret;
+ }
+
+ if (pkt_good_track(&ti)) {
+ printk("pktcdvd: can't write to this track\n");
+ return -ENXIO;
+ }
+
+ /*
+ * we keep packet size in 512 byte units, makes it easier to
+ * deal with request calculations.
+ */
+ pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2;
+ if (pd->settings.size == 0) {
+ printk("pktcdvd: detected zero packet size!\n");
+ pd->settings.size = 128;
+ }
+ pd->settings.fp = ti.fp;
+ pd->offset = (be32_to_cpu(ti.track_start) << 2) % pd->settings.size;
+
+ if (ti.nwa_v) {
+ pd->nwa = be32_to_cpu(ti.next_writable);
+ set_bit(PACKET_NWA_VALID, &pd->flags);
+ }
+
+ /*
+ * in theory we could use lra on -RW media as well and just zero
+ * blocks that haven't been written yet, but in practice that
+ * is just a no-go. we'll use that for -R, naturally.
+ */
+ if (ti.lra_v) {
+ pd->lra = be32_to_cpu(ti.last_rec_address);
+ set_bit(PACKET_LRA_VALID, &pd->flags);
+ } else {
+ pd->lra = 0xffffffff;
+ set_bit(PACKET_LRA_VALID, &pd->flags);
+ }
+
+ /*
+ * fine for now
+ */
+ pd->settings.link_loss = 7;
+ pd->settings.write_type = 0; /* packet */
+ pd->settings.track_mode = ti.track_mode;
+
+ /*
+ * mode1 or mode2 disc
+ */
+ switch (ti.data_mode) {
+ case PACKET_MODE1:
+ pd->settings.block_mode = PACKET_BLOCK_MODE1;
+ break;
+ case PACKET_MODE2:
+ pd->settings.block_mode = PACKET_BLOCK_MODE2;
+ break;
+ default:
+ printk("pktcdvd: unknown data mode\n");
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * enable/disable write caching on drive
+ */
+static int pkt_write_caching(struct pktcdvd_device *pd, int set)
+{
+ struct cdrom_generic_command cgc;
+ unsigned char buf[64];
+ int ret;
+
+ memset(buf, 0, sizeof(buf));
+ init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_READ);
+ cgc.buflen = pd->mode_offset + 12;
+
+ if ((ret = cdrom_mode_sense(pd->cdi, &cgc, GPMODE_WCACHING_PAGE, 0)))
+ return ret;
+
+ buf[pd->mode_offset + 10] |= (!!set << 2);
+
+ cgc.buflen = cgc.cmd[8] = 2 + ((buf[0] << 8) | (buf[1] & 0xff));
+ if (!(ret = cdrom_mode_select(pd->cdi, &cgc)))
+ printk("pktcdvd: %sabled write caching on %s\n", set ? "en" : "dis", pd->name);
+ return ret;
+}
+
+/*
+ * flush the drive cache to media
+ */
+static int pkt_flush_cache(struct pktcdvd_device *pd)
+{
+ struct cdrom_generic_command cgc;
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.cmd[0] = GPCMD_FLUSH_CACHE;
+ cgc.quiet = 1;
+
+ /*
+ * the IMMED bit -- we default to not setting it, although that
+ * would allow a much faster close
+ */
+#if 0
+ cgc.cmd[1] = 1 << 1;
+#endif
+ return pd->cdi->ops->generic_packet(pd->cdi, &cgc);
+}
+
+/*
+ * Returns drive current write speed
+ */
+static int pkt_get_speed(struct pktcdvd_device *pd)
+{
+ struct cdrom_generic_command cgc;
+ unsigned char buf[64];
+ int ret, offset;
+
+ memset(buf, 0, sizeof(buf));
+ init_cdrom_command(&cgc, buf, sizeof(buf), CGC_DATA_UNKNOWN);
+
+ ret = cdrom_mode_sense(pd->cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
+ if (ret) {
+ cgc.buflen = pd->mode_offset + buf[pd->mode_offset + 9] + 2 +
+ sizeof(struct mode_page_header);
+ ret = cdrom_mode_sense(pd->cdi, &cgc, GPMODE_CAPABILITIES_PAGE, 0);
+ if (ret)
+ return ret;
+ }
+
+ offset = pd->mode_offset + 26;
+ pd->speed = ((buf[offset] << 8) | buf[offset + 1]) / 0xb0;
+ return 0;
+}
+
+/*
+ * speed is given as the normal factor, e.g. 4 for 4x
+ */
+static int pkt_set_speed(struct pktcdvd_device *pd, unsigned speed)
+{
+ struct cdrom_generic_command cgc;
+ unsigned read_speed;
+
+ /*
+ * we set read and write time so that read spindle speed is one and
+ * a half as fast as write. although a drive can typically read much
+ * faster than write, this minimizes the spin up/down when we write
+ * and gather data. maybe 1/1 factor is faster, needs a bit of testing.
+ */
+ speed = speed * 0xb0;
+ read_speed = (speed * 3) >> 1;
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.cmd[0] = 0xbb;
+ cgc.cmd[2] = (read_speed >> 8) & 0xff;
+ cgc.cmd[3] = read_speed & 0xff;
+ cgc.cmd[4] = (speed >> 8) & 0xff;
+ cgc.cmd[5] = speed & 0xff;
+
+ return pd->cdi->ops->generic_packet(pd->cdi, &cgc);
+}
+
+/*
+ * Give me full power, Captain
+ */
+static int pkt_max_speed(struct pktcdvd_device *pd)
+{
+ disc_information di;
+ int ret;
+
+ /*
+ * FIXME: do proper unified cap page, also, this isn't proper
+ * Mt Fuji, but I think we can safely assume all drives support
+ * it. A hell of a lot more than support the GET_PERFORMANCE
+ * command (besides, we also use the old set speed command,
+ * not the streaming feature).
+ */
+ if ((ret = pkt_set_speed(pd, 8)))
+ return ret;
+
+ /*
+ * just do something with the disc -- next read will contain the
+ * maximum speed with this media
+ */
+ if ((ret = cdrom_get_disc_info(pd->dev, &di)))
+ return ret;
+
+ if ((ret = pkt_get_speed(pd))) {
+ printk("pktcdvd: failed get speed\n");
+ return ret;
+ }
+
+ DPRINTK("pktcdvd: speed (R/W) %u/%u\n", (pd->speed * 3) / 2, pd->speed);
+ return 0;
+}
+
+static int pkt_lock_tray(struct pktcdvd_device *pd, int lock)
+{
+ return pd->cdi->ops->lock_door(pd->cdi, !!lock);
+}
+
+#if 0
+static int pkt_track_capacity(struct pktcdvd_device *pd)
+{
+ disc_information di;
+ track_information ti;
+ int l_track, i, ret;
+ unsigned long size = 0;
+
+ memset(&di, 0, sizeof(disc_information));
+ memset(&ti, 0, sizeof(track_information));
+
+ if ((ret = cdrom_get_disc_info(pd->dev, &di))) {
+ DPRINTK("failed get_disc\n");
+ return ret;
+ }
+
+ l_track = di.last_track_lsb | di.last_track_msb >> 8;
+ DPRINTK("pktcdvd: last track %d\n", l_track);
+ for (i = di.n_first_track; i <= l_track; i++) {
+ if ((ret = cdrom_get_track_info(pd->dev, i, 1, &ti))) {
+ DPRINTK("pktcdvd: failed get_track\n");
+ return ret;
+ }
+ size += be32_to_cpu(ti.track_size);
+ }
+ pkt_sizes[MINOR(pd->pkt_dev)] = size << 1;
+ return 0;
+}
+
+static int pkt_set_capacity(struct pktcdvd_device *pd)
+{
+ struct cdrom_generic_command cgc;
+ struct cdrom_device_info *cdi = pd->cdi;
+ struct cdvd_capacity cap;
+ int ret;
+
+ init_cdrom_command(&cgc, &cap, sizeof(cap));
+ cgc.cmd[0] = GPCMD_READ_CDVD_CAPACITY;
+ if ((ret = cdi->ops->generic_packet(cdi, &cgc)))
+ return ret;
+
+ /*
+ * We should probably give up if read capacity fails, since then
+ * then disc is not ready to be written to -- for now I use
+ * raw devices and this is fine.
+ */
+ pkt_sizes[MINOR(pd->pkt_dev)] = be32_to_cpu(cap.lba) << 1;
+ return 0;
+}
+#endif
+
+static int pkt_open_write(struct pktcdvd_device *pd)
+{
+ int ret;
+
+ if ((ret = pkt_probe_settings(pd))) {
+ DPRINTK("pktcdvd: %s failed probe\n", pd->name);
+ return -EIO;
+ }
+
+ if ((ret = pkt_set_write_settings(pd))) {
+ DPRINTK("pktcdvd: %s failed saving write settings\n", pd->name);
+ return -EIO;
+ }
+
+ (void) pkt_write_caching(pd, USE_WCACHING);
+
+ if ((ret = pkt_max_speed(pd))) {
+ DPRINTK("pktcdvd: %s couldn't set write speed\n", pd->name);
+ return -EIO;
+ }
+ return 0;
+}
+
+/*
+ * called at open time. return 1 if the device can only be opened read-only.
+ */
+static int pkt_open_dev(struct pktcdvd_device *pd, int write)
+{
+ int ret;
+ long lba;
+
+ if (!pd->dev)
+ return 0;
+
+ if ((ret = cdrom_get_last_written(pd->dev, &lba)))
+ return ret;
+
+ pkt_sizes[MINOR(pd->pkt_dev)] = lba << 1;
+
+ if (write) {
+ if ((ret = pkt_open_write(pd)))
+ return ret;
+ clear_bit(PACKET_READONLY, &pd->flags);
+ } else {
+ if ((ret = pkt_max_speed(pd)))
+ return ret;
+ set_bit(PACKET_READONLY, &pd->flags);
+ }
+
+ if (write)
+ printk("pktcdvd: %luKB available on disc\n", lba << 1);
+ return 0;
+}
+
+/*
+ * called when the device is closed. makes sure that the device flushes
+ * the internal cache before we close.
+ */
+static void pkt_release_dev(struct pktcdvd_device *pd, int flush)
+{
+ fsync_dev(pd->pkt_dev);
+ invalidate_buffers(pd->pkt_dev);
+
+ if (flush)
+ if (pkt_flush_cache(pd))
+ DPRINTK("pktcdvd: %s not flushing cache\n", pd->name);
+
+ atomic_dec(&pd->refcnt);
+}
+
+static int pkt_open(struct inode *inode, struct file *file)
+{
+ struct pktcdvd_device *pd = NULL;
+ int ret = 0;
+
+ VPRINTK("pktcdvd: entering open\n");
+
+ MOD_INC_USE_COUNT;
+
+ /*
+ * should this really be necessary??
+ */
+ if (!inode) {
+ MOD_DEC_USE_COUNT;
+ return -EINVAL;
+ }
+
+ if (MINOR(inode->i_rdev) >= MAX_WRITERS) {
+ printk("pktcdvd: max %d writers supported\n", MAX_WRITERS);
+ MOD_DEC_USE_COUNT;
+ return -ENODEV;
+ }
+
+ /*
+ * either device is not configured, or pktsetup is old and doesn't
+ * use O_CREAT to create device
+ */
+ pd = &pkt_devs[MINOR(inode->i_rdev)];
+ if (!pd->dev && !(file->f_flags & O_CREAT)) {
+ ret = -ENXIO;
+ goto out_dec;
+ }
+
+ ret = -EBUSY;
+ atomic_inc(&pd->refcnt);
+ if ((atomic_read(&pd->refcnt) > 1) && (file->f_mode & FMODE_WRITE))
+ goto out;
+
+ ret = -EIO;
+ if (pkt_open_dev(pd, file->f_mode & FMODE_WRITE))
+ goto out;
+
+ /*
+ * needed here as well, since ext2 (among others) may change
+ * the blocksize at mount time
+ */
+ set_blocksize(pd->pkt_dev, CD_FRAMESIZE);
+ return 0;
+out_dec:
+ atomic_dec(&pd->refcnt);
+out:
+ VPRINTK("pktcdvd: failed open\n");
+ MOD_DEC_USE_COUNT;
+ return ret;
+}
+
+static int pkt_close(struct inode *inode, struct file *file)
+{
+ struct pktcdvd_device *pd = &pkt_devs[MINOR(inode->i_rdev)];
+ int ret = 0;
+
+ if (pd->dev)
+ pkt_release_dev(pd, 1);
+ MOD_DEC_USE_COUNT;
+ return ret;
+}
+
+/*
+ * pktcdvd i/o elevator
+ *
+ * rules: always merge whenever possible, and support hole merges
+ */
+
+static int pkt_elevator_merge(request_queue_t *q, struct request **req,
+ struct list_head *head,
+ struct buffer_head *bh, int rw,
+ int max_sectors)
+{
+ struct list_head *entry = &q->queue_head;
+ unsigned int count = bh->b_size >> 9, ret = ELEVATOR_NO_MERGE;
+ struct pktcdvd_device *pd = pkt_find_dev(bh->b_rdev);
+
+ VPRINTK("pkt_elevator_merge: rw=%d, ms=%d, bh=%ld (%lu), dev=%d, count=%d\n", rw, \
max_sectors, bh->b_blocknr, bh->b_rsector, bh->b_rdev, count); +
+ while ((entry = entry->prev) != head) {
+ struct request *__rq = blkdev_entry_to_request(entry);
+ VPRINTK("cmd=%d, bh=%ld (%lu/%lu), dev=%d, sem=%p, seq=%d, req=%p, ret=%d\n",
+ __rq->cmd, __rq->bh->b_blocknr, __rq->sector, __rq->nr_sectors, __rq->rq_dev, \
__rq->sem, __rq->elevator_sequence, *req, ret); + /*
+ * simply "aging" of requests in queue
+ */
+ if (rw == READ && __rq->elevator_sequence-- <= 0)
+ break;
+
+ if (__rq->sem)
+ continue;
+ if (__rq->rq_dev != bh->b_rdev)
+ continue;
+ if (!*req && bh_rq_in_between(bh, __rq, &q->queue_head))
+ *req = __rq;
+ if (__rq->cmd != rw)
+ continue;
+ if (__rq->nr_sectors + count > max_sectors)
+ continue;
+ if (rw == READ && __rq->elevator_sequence < count)
+ continue;
+ if (rw == WRITE && ZONE(__rq->sector, pd) != ZONE(bh->b_rsector, pd))
+ continue;
+ if (__rq->sector + __rq->nr_sectors == bh->b_rsector) {
+ ret = ELEVATOR_BACK_MERGE;
+ *req = __rq;
+ break;
+ } else if (__rq->sector - count == bh->b_rsector) {
+ ret = ELEVATOR_FRONT_MERGE;
+ __rq->elevator_sequence -= count;
+ *req = __rq;
+ break;
+ }
+ }
+ VPRINTK("*req=%p, ret=%d\n", *req, ret);
+
+ return ret;
+}
+
+static void pkt_init_queue(struct pktcdvd_device *pd)
+{
+ request_queue_t *q = &pd->cdrw.r_queue;
+
+ blk_init_queue(q, pkt_request);
+ blk_queue_headactive(q, 0);
+ elevator_init(&q->elevator, ELEVATOR_PKTCDVD);
+ q->front_merge_fn = pkt_front_merge_fn;
+ q->back_merge_fn = pkt_back_merge_fn;
+ q->merge_requests_fn = pkt_merge_requests_fn;
+ q->queuedata = pd;
+}
+
+static int pkt_new_dev(struct pktcdvd_device *pd, kdev_t dev, struct block_device \
*bdev) +{
+ struct cdrom_device_info *cdi;
+ request_queue_t *q;
+ int i;
+
+ for (i = 0; i < MAX_WRITERS; i++) {
+ if (pkt_devs[i].dev == dev) {
+ printk("pktcdvd: %s already setup\n", kdevname(dev));
+ return -EBUSY;
+ }
+ }
+
+ for (i = 0; i < MAX_WRITERS; i++)
+ if (pd == &pkt_devs[i])
+ break;
+
+ if (i == MAX_WRITERS) {
+ printk("pktcdvd: max %d writers supported\n", MAX_WRITERS);
+ return -ENXIO;
+ }
+
+ cdi = cdrom_find_device(dev);
+ if (cdi == NULL) {
+ printk("pktcdvd: %s is not a CD-ROM\n", kdevname(dev));
+ return -ENXIO;
+ }
+
+ MOD_INC_USE_COUNT;
+
+ memset(pd, 0, sizeof(struct pktcdvd_device));
+ atomic_set(&pd->cdrw.pending_bh, 0);
+ atomic_set(&pd->cdrw.free_bh, 0);
+ spin_lock_init(&pd->lock);
+ if (pkt_grow_bhlist(pd, PACKET_MAX_SIZE) < PACKET_MAX_SIZE) {
+ printk("pktcdvd: not enough memory for buffers\n");
+ return -ENOMEM;
+ }
+ set_blocksize(dev, CD_FRAMESIZE);
+ pd->cdi = cdi;
+ pd->dev = dev;
+ pd->bdev = bdev;
+ pd->pkt_dev = MKDEV(PACKET_MAJOR, i);
+ sprintf(pd->name, "pktcdvd%d", i);
+ atomic_set(&pd->refcnt, 0);
+ init_waitqueue_head(&pd->wqueue);
+ init_waitqueue_head(&pd->lock_wait);
+
+ /*
+ * store device merge functions (SCSI uses their own to build
+ * scatter-gather tables)
+ */
+ q = blk_get_queue(dev);
+ spin_lock_irq(&io_request_lock);
+ pkt_init_queue(pd);
+ pd->cdrw.front_merge_fn = q->front_merge_fn;
+ pd->cdrw.back_merge_fn = q->back_merge_fn;
+ pd->cdrw.merge_requests_fn = q->merge_requests_fn;
+ pd->cdrw.queuedata = q->queuedata;
+ spin_unlock_irq(&io_request_lock);
+
+ pd->cdrw.pid = kernel_thread(kcdrwd, pd, CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
+
+ DPRINTK("pktcdvd: writer %s sucessfully registered\n", cdi->name);
+ return 0;
+}
+
+/*
+ * arg contains file descriptor of CD-ROM device.
+ */
+static int pkt_setup_dev(struct pktcdvd_device *pd, unsigned int arg)
+{
+ struct inode *inode;
+ struct file *file;
+ int ret;
+
+ if ((file = fget(arg)) == NULL) {
+ printk("pktcdvd: bad file descriptor passed\n");
+ return -EBADF;
+ }
+
+ ret = -EINVAL;
+ if ((inode = file->f_dentry->d_inode) == NULL) {
+ printk("pktcdvd: huh? file descriptor contains no inode?\n");
+ goto out;
+ }
+ ret = -ENOTBLK;
+ if (!S_ISBLK(inode->i_mode)) {
+ printk("pktcdvd: device is not a block device (duh)\n");
+ goto out;
+ }
+ ret = blkdev_get(inode->i_bdev, file->f_mode, file->f_flags, BDEV_FILE);
+ if (ret)
+ goto out;
+ ret = -EROFS;
+ if (IS_RDONLY(inode)) {
+ printk("pktcdvd: Can't write to read-only dev\n");
+ goto out;
+ }
+ if ((ret = pkt_new_dev(pd, inode->i_rdev, inode->i_bdev))) {
+ printk("pktcdvd: all booked up\n");
+ goto out;
+ }
+
+ pd->pkt_dentry = dget(file->f_dentry);
+ atomic_inc(&pd->refcnt);
+
+ if ((ret = pkt_lock_tray(pd, 1)))
+ printk("pktcdvd: can't lock drive tray\n");
+
+out:
+ fput(file);
+ return ret;
+}
+
+static int pkt_remove_dev(struct pktcdvd_device *pd)
+{
+ int ret;
+
+ /*
+ * will also invalidate buffers for CD-ROM
+ */
+ blkdev_put(pd->pkt_dentry->d_inode->i_bdev, BDEV_FILE);
+ dput(pd->pkt_dentry);
+ invalidate_buffers(pd->pkt_dev);
+
+ /*
+ * Unlock CD-ROM device
+ */
+ (void) pkt_lock_tray(pd, 0);
+
+ if ((ret = pkt_shrink_bhlist(pd, PACKET_MAX_SIZE)) != PACKET_MAX_SIZE)
+ printk("pktcdvd: leaked %d buffers\n", PACKET_MAX_SIZE - ret);
+
+ if ((ret = kill_proc(pd->cdrw.pid, SIGKILL, 1)) == 0) {
+ int count = 10;
+ while (test_bit(PACKET_THREAD, &pd->flags) && --count) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule_timeout(HZ / 10);
+ }
+ if (!count)
+ printk("pkt_exit: can't kill kernel thread\n");
+ }
+
+ blk_cleanup_queue(&pd->cdrw.r_queue);
+ DPRINTK("pktcdvd: writer %s unregistered\n", pd->cdi->name);
+ memset(pd, 0, sizeof(struct pktcdvd_device));
+ MOD_DEC_USE_COUNT;
+ return 0;
+}
+
+static int pkt_media_change(kdev_t dev)
+{
+ struct pktcdvd_device *pd = pkt_find_dev(dev);
+ struct cdrom_device_info *cdi = pd->cdi;
+
+ if (pd == NULL)
+ return 0;
+
+ return cdrom_fops.check_media_change(pd->dev);
+// return cdi->ops->dev_ioctl(cdi, CDROM_MEDIA_CHANGED, CDSL_CURRENT);
+}
+
+static int pkt_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct pktcdvd_device *pd = &pkt_devs[MINOR(inode->i_rdev)];
+
+ if ((cmd != PACKET_SETUP_DEV) && !pd->dev) {
+ DPRINTK("pktcdvd: dev not setup\n");
+ return -ENXIO;
+ }
+
+ switch (cmd) {
+ case PACKET_GET_STATS:
+ if (copy_to_user(&arg, &pd->stats, sizeof(struct packet_stats)))
+ return -EFAULT;
+
+ case PACKET_SETUP_DEV:
+ if (pd->dev) {
+ printk("pktcdvd: dev already setup\n");
+ return -EBUSY;
+ }
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return pkt_setup_dev(pd, arg);
+
+ case PACKET_TEARDOWN_DEV:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (atomic_read(&pd->refcnt) != 1)
+ return -EBUSY;
+ return pkt_remove_dev(pd);
+
+ case PACKET_WAKEUP:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ wake_up(&pd->wqueue);
+
+ case BLKGETSIZE:
+ return put_user(blk_size[PACKET_MAJOR][MINOR(inode->i_rdev)] << 1, (long *)arg);
+
+ case BLKROSET:
+ case BLKROGET:
+ case BLKSSZGET:
+ case BLKRASET:
+ case BLKRAGET:
+ case BLKFLSBUF:
+ return blk_ioctl(inode->i_rdev, cmd, arg);
+
+ /*
+ * forward selected CDROM ioctls to CD-ROM, for UDF
+ */
+ case CDROMMULTISESSION:
+ case CDROMREADTOCENTRY:
+ case CDROM_LAST_WRITTEN:
+ case CDROM_SEND_PACKET:
+ case SCSI_IOCTL_SEND_COMMAND:
+// return cdrom_fops.ioctl(inode, file, cmd, arg);
+ return ioctl_by_bdev(pd->bdev, cmd, arg);
+// return pd->cdi->ops->dev_ioctl(pd->cdi, cmd, arg);
+
+ default:
+ printk("pktcdvd: Unknown ioctl for %s (%x)\n", pd->name, cmd);
+ return -ENOTTY;
+ }
+
+ return 0;
+}
+
+static struct block_device_operations pktcdvd_ops = {
+ open: pkt_open,
+ release: pkt_close,
+ ioctl: pkt_ioctl,
+ check_media_change: pkt_media_change,
+};
+
+static int list_nr_items(struct pktcdvd_device *pd, struct list_head *head,
+ spinlock_t *lock)
+{
+ struct list_head *foo;
+ int i;
+
+ spin_lock_irq(lock);
+ if (list_empty(head)) {
+ spin_unlock_irq(lock);
+ return 0;
+ }
+
+ i = 0;
+ list_for_each(foo, head)
+ i++;
+
+ spin_unlock_irq(lock);
+ return i;
+}
+
+static int pkt_proc_device(struct pktcdvd_device *pd, char *buf)
+{
+ char *b = buf, *msg;
+
+ b += sprintf(b, "\nWriter %s (%s):\n", pd->name, kdevname(pd->dev));
+
+ b += sprintf(b, "\nSettings:\n");
+ b += sprintf(b, "\tpacket size:\t\t%dKB\n", pd->settings.size / 2);
+
+ if (pd->settings.write_type == 0)
+ msg = "Packet";
+ else
+ msg = "Unknown";
+ b += sprintf(b, "\twrite type:\t\t%s\n", msg);
+
+ b += sprintf(b, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable");
+ b += sprintf(b, "\tlink loss:\t\t%d\n", pd->settings.link_loss);
+
+ b += sprintf(b, "\ttrack mode:\t\t%d\n", pd->settings.track_mode);
+
+ if (pd->settings.block_mode == PACKET_BLOCK_MODE1)
+ msg = "Mode 1";
+ else if (pd->settings.block_mode == PACKET_BLOCK_MODE2)
+ msg = "Mode 2";
+ else
+ msg = "Unknown";
+ b += sprintf(b, "\tblock mode:\t\t%s\n", msg);
+
+ b += sprintf(b, "\nStatistics:\n");
+ b += sprintf(b, "\tbuffers started:\t%lu\n", pd->stats.bh_s);
+ b += sprintf(b, "\tbuffers ended:\t\t%lu\n", pd->stats.bh_e);
+ b += sprintf(b, "\tsectors written:\t%lu\n", pd->stats.secs_w);
+ b += sprintf(b, "\tsectors read:\t\t%lu\n", pd->stats.secs_r);
+
+ b += sprintf(b, "\nMisc:\n");
+ b += sprintf(b, "\treference count:\t%d\n", atomic_read(&pd->refcnt));
+ b += sprintf(b, "\tflags:\t\t\t0x%lx\n", pd->flags);
+ b += sprintf(b, "\twrite speed:\t\t%uKB/sec\n", pd->speed * 150);
+ b += sprintf(b, "\tstart offset:\t\t%lu\n", pd->offset);
+ b += sprintf(b, "\tmode page offset:\t%u\n", pd->mode_offset);
+
+ b += sprintf(b, "\nQueue state:\n");
+ b += sprintf(b, "\tfree buffers:\t\t%u\n", atomic_read(&pd->cdrw.free_bh));
+ b += sprintf(b, "\tpending buffers:\t%u\n", atomic_read(&pd->cdrw.pending_bh));
+
+ b += sprintf(b, "\trequest active:\t\t%s\n", pd->rq ? "yes" : "no");
+ b += sprintf(b, "\tqueue requests:\t\t%u\n", list_nr_items(pd, \
&pd->cdrw.r_queue.queue_head, &io_request_lock)); +
+ return b - buf;
+}
+
+static int pkt_read_proc(char *page, char **start, off_t off, int count,
+ int *eof, void *data)
+{
+ struct pktcdvd_device *pd;
+ char *buf = page;
+ int len, i;
+
+ len = sprintf(buf, "%s\n", VERSION_CODE);
+ buf += len;
+
+ for (i = 0; i < MAX_WRITERS; i++) {
+ pd = &pkt_devs[i];
+ if (pd->dev) {
+ len += pkt_proc_device(pd, buf);
+ buf += len;
+ }
+ }
+
+ if (len <= off + count)
+ *eof = 1;
+
+ *start = page + off;
+ len -= off;
+ if (len > count)
+ len = count;
+ if (len < 0)
+ len = 0;
+
+ return len;
+}
+
+int __init pkt_init(void)
+{
+ devfs_register(NULL, "pktcdvd", 0, DEVFS_FL_DEFAULT, PACKET_MAJOR,
+ S_IFBLK | S_IRUSR | S_IWUSR, &pktcdvd_ops, NULL);
+ if (devfs_register_blkdev(PACKET_MAJOR, "pktcdvd", &pktcdvd_ops)) {
+ printk("unable to register pktcdvd device\n");
+ return -EIO;
+ }
+
+ pkt_sizes = kmalloc(MAX_WRITERS * sizeof(int), GFP_KERNEL);
+ if (pkt_sizes == NULL)
+ goto err;
+
+ pkt_blksize = kmalloc(MAX_WRITERS * sizeof(int), GFP_KERNEL);
+ if (pkt_blksize == NULL)
+ goto err;
+
+ pkt_readahead = kmalloc(MAX_WRITERS * sizeof(int), GFP_KERNEL);
+ if (pkt_readahead == NULL)
+ goto err;
+
+ pkt_devs = kmalloc(MAX_WRITERS * sizeof(struct pktcdvd_device), GFP_KERNEL);
+ if (pkt_devs == NULL)
+ goto err;
+
+ memset(pkt_devs, 0, MAX_WRITERS * sizeof(struct pktcdvd_device));
+ memset(pkt_sizes, 0, MAX_WRITERS * sizeof(int));
+ memset(pkt_blksize, 0, MAX_WRITERS * sizeof(int));
+ memset(pkt_readahead, 0, MAX_WRITERS * sizeof(int));
+
+ blk_size[PACKET_MAJOR] = pkt_sizes;
+ blksize_size[PACKET_MAJOR] = pkt_blksize;
+ max_readahead[PACKET_MAJOR] = pkt_readahead;
+ read_ahead[PACKET_MAJOR] = 128;
+ set_blocksize(MKDEV(PACKET_MAJOR, 0), CD_FRAMESIZE);
+
+ blk_dev[PACKET_MAJOR].queue = pkt_return_queue;
+
+ create_proc_read_entry("driver/pktcdvd", 0, 0, pkt_read_proc, NULL);
+
+ DPRINTK("pktcdvd: %s\n", VERSION_CODE);
+ return 0;
+
+err:
+ printk("pktcdvd: out of memory\n");
+ devfs_unregister(devfs_find_handle(NULL, "pktcdvd", 0, 0,
+ DEVFS_SPECIAL_BLK, 0));
+ devfs_unregister_blkdev(PACKET_MAJOR, "pktcdvd");
+ kfree(pkt_devs);
+ kfree(pkt_sizes);
+ kfree(pkt_blksize);
+ kfree(pkt_readahead);
+ return -ENOMEM;
+}
+
+void __exit pkt_exit(void)
+{
+ devfs_unregister(devfs_find_handle(NULL, "pktcdvd", 0, 0,
+ DEVFS_SPECIAL_BLK, 0));
+ devfs_unregister_blkdev(PACKET_MAJOR, "pktcdvd");
+
+ remove_proc_entry("driver/pktcdvd", NULL);
+ kfree(pkt_sizes);
+ kfree(pkt_blksize);
+ kfree(pkt_devs);
+ kfree(pkt_readahead);
+}
+
+MODULE_DESCRIPTION("Packet writing layer for CD/DVD drives");
+MODULE_AUTHOR("Jens Axboe <axboe@suse.de>");
+
+module_init(pkt_init);
+module_exit(pkt_exit);
diff -ruN linux/drivers/ide/ide-cd.c linux-packet/drivers/ide/ide-cd.c
--- linux/drivers/ide/ide-cd.c Fri Feb 9 11:30:23 2001
+++ linux-packet/drivers/ide/ide-cd.c Tue Mar 13 11:33:06 2001
@@ -292,9 +292,11 @@
* correctly reporting tray status -- from
* Michael D Johnson <johnsom@orst.edu>
*
+ * 4.99 - Added write support for packet writing.
+ *
*************************************************************************/
-#define IDECD_VERSION "4.59"
+#define IDECD_VERSION "4.99"
#include <linux/config.h>
#include <linux/module.h>
@@ -549,7 +551,7 @@
(struct packet_command *) pc->sense,
(struct request_sense *) (pc->buffer - pc->c[4]));
}
- if (rq->cmd == READ || rq->cmd == WRITE)
+ if (rq->cmd == READ || rq->cmd == WRITE || rq->cmd == WRITE_PACKET)
if (!rq->current_nr_sectors)
uptodate = 1;
@@ -1608,6 +1610,7 @@
struct cdrom_info *info = drive->driver_data;
switch (rq->cmd) {
+ case WRITE_PACKET:
case WRITE:
case READ: {
if (CDROM_CONFIG_FLAGS(drive)->seeking) {
@@ -2409,6 +2412,12 @@
static
void ide_cdrom_release_real (struct cdrom_device_info *cdi)
{
+ struct cdrom_generic_command cgc;
+
+ init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE);
+ cgc.cmd[0] = GPCMD_FLUSH_CACHE;
+ cgc.quiet = 1;
+ (void) ide_cdrom_packet(cdi, &cgc);
}
@@ -2588,15 +2597,10 @@
printk(" %dX", CDROM_CONFIG_FLAGS(drive)->max_speed);
printk(" %s", CDROM_CONFIG_FLAGS(drive)->dvd ? "DVD-ROM" : "CD-ROM");
- if (CDROM_CONFIG_FLAGS (drive)->dvd_r|CDROM_CONFIG_FLAGS (drive)->dvd_ram)
- printk (" DVD%s%s",
- (CDROM_CONFIG_FLAGS (drive)->dvd_r)? "-R" : "",
- (CDROM_CONFIG_FLAGS (drive)->dvd_ram)? "-RAM" : "");
-
- if (CDROM_CONFIG_FLAGS (drive)->cd_r|CDROM_CONFIG_FLAGS (drive)->cd_rw)
- printk (" CD%s%s",
- (CDROM_CONFIG_FLAGS (drive)->cd_r)? "-R" : "",
- (CDROM_CONFIG_FLAGS (drive)->cd_rw)? "/RW" : "");
+ if (CDROM_CONFIG_FLAGS(drive)->dvd_r || CDROM_CONFIG_FLAGS(drive)->dvd_ram)
+ printk (" DVD-R%s", (CDROM_CONFIG_FLAGS (drive)->dvd_ram)? "AM" : "");
+ if (CDROM_CONFIG_FLAGS(drive)->cd_r ||CDROM_CONFIG_FLAGS(drive)->cd_rw)
+ printk (" CD-R%s", (CDROM_CONFIG_FLAGS (drive)->cd_rw)? "/RW" : "");
if (CDROM_CONFIG_FLAGS (drive)->is_changer)
printk (" changer w/%d slots", nslots);
@@ -2619,7 +2623,7 @@
int major = HWIF(drive)->major;
int minor = drive->select.b.unit << PARTN_BITS;
- ide_add_setting(drive, "breada_readahead", SETTING_RW, BLKRAGET, BLKRASET, \
TYPE_INT, 0, 255, 1, 2, &read_ahead[major], NULL); \
+ ide_add_setting(drive, "breada_readahead", SETTING_RW, BLKRA
[prev in list] [next in list] [prev in thread] [next in thread]
Configure |
About |
News |
Add a list |
Sponsored by KoreLogic