| 
             static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,  int len, char *buf) {  … }
  static int mtdblock_open(struct inode *inode, struct file *file) {  … }
  static release_t mtdblock_release(struct inode *inode, struct file *file) {  int dev;  struct mtdblk_dev *mtdblk;  DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n");
   if (inode == NULL)   release_return(-ENODEV);
   dev = minor(inode->i_rdev);  mtdblk = mtdblks[dev];
   down(&mtdblk->cache_sem);  write_cached_data(mtdblk);  up(&mtdblk->cache_sem);
   spin_lock(&mtdblks_lock);  if (!--mtdblk->count) {   /* It was the last usage. Free the device */   mtdblks[dev] = NULL;   spin_unlock(&mtdblks_lock);   if (mtdblk->mtd->sync)    mtdblk->mtd->sync(mtdblk->mtd);    put_mtd_device(mtdblk->mtd);    vfree(mtdblk->cache_data);    kfree(mtdblk);  } else {   spin_unlock(&mtdblks_lock);  }
   DEBUG(MTD_DEBUG_LEVEL1, "ok\n");    BLK_DEC_USE_COUNT;  release_return(0); } 
  /*  * This is a special request_fn because it is executed in a process context  * to be able to sleep independently of the caller. The * io_request_lock (for <2.5) or queue_lock (for >=2.5) is held upon entry  * and exit. The head of our request queue is considered active so there is * no need to dequeue requests before we are done. */ static void handle_mtdblock_request(void) {  struct request *req;  struct mtdblk_dev *mtdblk;  unsigned int res;
   for (;;) {   INIT_REQUEST;   req = CURRENT;   spin_unlock_irq(QUEUE_LOCK(QUEUE));    mtdblk = mtdblks[minor(req->rq_dev)];   res = 0; 
             |