diff options
Diffstat (limited to 'drivers')
93 files changed, 5322 insertions, 2745 deletions
diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index d91a3a0b2325..deb4a456cf83 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -156,9 +156,7 @@ static int dev_mkdir(const char *name, umode_t mode) if (!err) /* mark as kernel-created inode */ dentry->d_inode->i_private = &thread; - dput(dentry); - mutex_unlock(&path.dentry->d_inode->i_mutex); - path_put(&path); + done_path_create(&path, dentry); return err; } @@ -218,10 +216,7 @@ static int handle_create(const char *nodename, umode_t mode, struct device *dev) /* mark as kernel-created inode */ dentry->d_inode->i_private = &thread; } - dput(dentry); - - mutex_unlock(&path.dentry->d_inode->i_mutex); - path_put(&path); + done_path_create(&path, dentry); return err; } diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c index e54e31b02b88..3fbef018ce55 100644 --- a/drivers/block/drbd/drbd_actlog.c +++ b/drivers/block/drbd/drbd_actlog.c @@ -411,7 +411,7 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) + mdev->ldev->md.al_offset + mdev->al_tr_pos; if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) - drbd_chk_io_error(mdev, 1, true); + drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); if (++mdev->al_tr_pos > div_ceil(mdev->act_log->nr_elements, AL_EXTENTS_PT)) @@ -876,7 +876,11 @@ int __drbd_set_out_of_sync(struct drbd_conf *mdev, sector_t sector, int size, unsigned int enr, count = 0; struct lc_element *e; - if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { + /* this should be an empty REQ_FLUSH */ + if (size == 0) + return 0; + + if (size < 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) { dev_err(DEV, "sector: %llus, size: %d\n", (unsigned long long)sector, size); return 0; diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index fcb956bb4b4c..ba91b408abad 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -1096,7 +1096,7 @@ static int bm_rw(struct drbd_conf *mdev, int rw, unsigned flags, unsigned lazy_w if (ctx->error) { dev_alert(DEV, "we had at least one MD IO ERROR during bitmap IO\n"); - drbd_chk_io_error(mdev, 1, true); + drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); err = -EIO; /* ctx->error ? */ } @@ -1212,7 +1212,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc wait_until_done_or_disk_failure(mdev, mdev->ldev, &ctx->done); if (ctx->error) - drbd_chk_io_error(mdev, 1, true); + drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); /* that should force detach, so the in memory bitmap will be * gone in a moment as well. */ diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 02f013a073a7..b2ca143d0053 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -813,7 +813,6 @@ enum { SIGNAL_ASENDER, /* whether asender wants to be interrupted */ SEND_PING, /* whether asender should send a ping asap */ - UNPLUG_QUEUED, /* only relevant with kernel 2.4 */ UNPLUG_REMOTE, /* sending a "UnplugRemote" could help */ MD_DIRTY, /* current uuids and flags not yet on disk */ DISCARD_CONCURRENT, /* Set on one node, cleared on the peer! */ @@ -824,7 +823,6 @@ enum { CRASHED_PRIMARY, /* This node was a crashed primary. * Gets cleared when the state.conn * goes into C_CONNECTED state. */ - NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */ CONSIDER_RESYNC, MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */ @@ -834,6 +832,7 @@ enum { BITMAP_IO_QUEUED, /* Started bitmap IO */ GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */ WAS_IO_ERROR, /* Local disk failed returned IO error */ + FORCE_DETACH, /* Force-detach from local disk, aborting any pending local IO */ RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ NET_CONGESTED, /* The data socket is congested */ @@ -851,6 +850,13 @@ enum { AL_SUSPENDED, /* Activity logging is currently suspended. */ AHEAD_TO_SYNC_SOURCE, /* Ahead -> SyncSource queued */ STATE_SENT, /* Do not change state/UUIDs while this is set */ + + CALLBACK_PENDING, /* Whether we have a call_usermodehelper(, UMH_WAIT_PROC) + * pending, from drbd worker context. + * If set, bdi_write_congested() returns true, + * so shrink_page_list() would not recurse into, + * and potentially deadlock on, this drbd worker. + */ }; struct drbd_bitmap; /* opaque for drbd_conf */ @@ -1130,8 +1136,8 @@ struct drbd_conf { int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ int rs_planed; /* resync sectors already planned */ atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ - int peer_max_bio_size; - int local_max_bio_size; + unsigned int peer_max_bio_size; + unsigned int local_max_bio_size; }; static inline struct drbd_conf *minor_to_mdev(unsigned int minor) @@ -1435,9 +1441,9 @@ struct bm_extent { * hash table. */ #define HT_SHIFT 8 #define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT)) -#define DRBD_MAX_BIO_SIZE_SAFE (1 << 12) /* Works always = 4k */ +#define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */ -#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */ +#define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* The old header only allows packets up to 32Kib data */ /* Number of elements in the app_reads_hash */ #define APP_R_HSIZE 15 @@ -1840,12 +1846,20 @@ static inline int drbd_request_state(struct drbd_conf *mdev, return _drbd_request_state(mdev, mask, val, CS_VERBOSE + CS_ORDERED); } +enum drbd_force_detach_flags { + DRBD_IO_ERROR, + DRBD_META_IO_ERROR, + DRBD_FORCE_DETACH, +}; + #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__) -static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, const char *where) +static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, + enum drbd_force_detach_flags forcedetach, + const char *where) { switch (mdev->ldev->dc.on_io_error) { case EP_PASS_ON: - if (!forcedetach) { + if (forcedetach == DRBD_IO_ERROR) { if (__ratelimit(&drbd_ratelimit_state)) dev_err(DEV, "Local IO failed in %s.\n", where); if (mdev->state.disk > D_INCONSISTENT) @@ -1856,6 +1870,8 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, case EP_DETACH: case EP_CALL_HELPER: set_bit(WAS_IO_ERROR, &mdev->flags); + if (forcedetach == DRBD_FORCE_DETACH) + set_bit(FORCE_DETACH, &mdev->flags); if (mdev->state.disk > D_FAILED) { _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); dev_err(DEV, @@ -1875,7 +1891,7 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, */ #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__) static inline void drbd_chk_io_error_(struct drbd_conf *mdev, - int error, int forcedetach, const char *where) + int error, enum drbd_force_detach_flags forcedetach, const char *where) { if (error) { unsigned long flags; @@ -2405,15 +2421,17 @@ static inline void dec_ap_bio(struct drbd_conf *mdev) int ap_bio = atomic_dec_return(&mdev->ap_bio_cnt); D_ASSERT(ap_bio >= 0); + + if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) { + if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) + drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); + } + /* this currently does wake_up for every dec_ap_bio! * maybe rather introduce some type of hysteresis? * e.g. (ap_bio == mxb/2 || ap_bio == 0) ? */ if (ap_bio < mxb) wake_up(&mdev->misc_wait); - if (ap_bio == 0 && test_bit(BITMAP_IO, &mdev->flags)) { - if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags)) - drbd_queue_work(&mdev->data.work, &mdev->bm_io_work.w); - } } static inline int drbd_set_ed_uuid(struct drbd_conf *mdev, u64 val) diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 920ede2829d6..2e0e7fc1dbba 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -1514,6 +1514,13 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, /* Do not change the order of the if above and the two below... */ if (os.pdsk == D_DISKLESS && ns.pdsk > D_DISKLESS) { /* attach on the peer */ + /* we probably will start a resync soon. + * make sure those things are properly reset. */ + mdev->rs_total = 0; + mdev->rs_failed = 0; + atomic_set(&mdev->rs_pending_cnt, 0); + drbd_rs_cancel_all(mdev); + drbd_send_uuids(mdev); drbd_send_state(mdev, ns); } @@ -1630,9 +1637,24 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, eh = mdev->ldev->dc.on_io_error; was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags); - /* Immediately allow completion of all application IO, that waits - for completion from the local disk. */ - tl_abort_disk_io(mdev); + if (was_io_error && eh == EP_CALL_HELPER) + drbd_khelper(mdev, "local-io-error"); + + /* Immediately allow completion of all application IO, + * that waits for completion from the local disk, + * if this was a force-detach due to disk_timeout + * or administrator request (drbdsetup detach --force). + * Do NOT abort otherwise. + * Aborting local requests may cause serious problems, + * if requests are completed to upper layers already, + * and then later the already submitted local bio completes. + * This can cause DMA into former bio pages that meanwhile + * have been re-used for other things. + * So aborting local requests may cause crashes, + * or even worse, silent data corruption. + */ + if (test_and_clear_bit(FORCE_DETACH, &mdev->flags)) + tl_abort_disk_io(mdev); /* current state still has to be D_FAILED, * there is only one way out: to D_DISKLESS, @@ -1653,9 +1675,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, drbd_md_sync(mdev); } put_ldev(mdev); - - if (was_io_error && eh == EP_CALL_HELPER) - drbd_khelper(mdev, "local-io-error"); } /* second half of local IO error, failure to attach, @@ -1669,10 +1688,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, "ASSERT FAILED: disk is %s while going diskless\n", drbd_disk_str(mdev->state.disk)); - mdev->rs_total = 0; - mdev->rs_failed = 0; - atomic_set(&mdev->rs_pending_cnt, 0); - if (ns.conn >= C_CONNECTED) drbd_send_state(mdev, ns); @@ -2194,7 +2209,8 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl { struct p_sizes p; sector_t d_size, u_size; - int q_order_type, max_bio_size; + int q_order_type; + unsigned int max_bio_size; int ok; if (get_ldev_if_state(mdev, D_NEGOTIATING)) { @@ -2203,7 +2219,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl u_size = mdev->ldev->dc.disk_size; q_order_type = drbd_queue_order_type(mdev); max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9; - max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE); + max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE); put_ldev(mdev); } else { d_size = 0; @@ -2214,7 +2230,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */ if (mdev->agreed_pro_version <= 94) - max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET); + max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET); p.d_size = cpu_to_be64(d_size); p.u_size = cpu_to_be64(u_size); @@ -3541,6 +3557,22 @@ static int drbd_congested(void *congested_data, int bdi_bits) goto out; } + if (test_bit(CALLBACK_PENDING, &mdev->flags)) { + r |= (1 << BDI_async_congested); + /* Without good local data, we would need to read from remote, + * and that would need the worker thread as well, which is + * currently blocked waiting for that usermode helper to + * finish. + */ + if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) + r |= (1 << BDI_sync_congested); + else + put_ldev(mdev); + r &= bdi_bits; + reason = 'c'; + goto out; + } + if (get_ldev(mdev)) { q = bdev_get_queue(mdev->ldev->backing_bdev); r = bdi_congested(&q->backing_dev_info, bdi_bits); @@ -3604,6 +3636,7 @@ struct drbd_conf *drbd_new_device(unsigned int minor) q->backing_dev_info.congested_data = mdev; blk_queue_make_request(q, drbd_make_request); + blk_queue_flush(q, REQ_FLUSH | REQ_FUA); /* Setting the max_hw_sectors to an odd value of 8kibyte here This triggers a max_bio_size message upon first attach or connect */ blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); @@ -3870,7 +3903,7 @@ void drbd_md_sync(struct drbd_conf *mdev) if (!drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) { /* this was a try anyways ... */ dev_err(DEV, "meta data update failed!\n"); - drbd_chk_io_error(mdev, 1, true); + drbd_chk_io_error(mdev, 1, DRBD_META_IO_ERROR); } /* Update mdev->ldev->md.la_size_sect, @@ -3950,9 +3983,9 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) spin_lock_irq(&mdev->req_lock); if (mdev->state.conn < C_CONNECTED) { - int peer; + unsigned int peer; peer = be32_to_cpu(buffer->la_peer_max_bio_size); - peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE); + peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE); mdev->peer_max_bio_size = peer; } spin_unlock_irq(&mdev->req_lock); diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 6d4de6a72e80..fb9dce8daa24 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c @@ -147,6 +147,9 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd) char *argv[] = {usermode_helper, cmd, mb, NULL }; int ret; + if (current == mdev->worker.task) + set_bit(CALLBACK_PENDING, &mdev->flags); + snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev)); if (get_net_conf(mdev)) { @@ -189,6 +192,9 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd) usermode_helper, cmd, mb, (ret >> 8) & 0xff, ret); + if (current == mdev->worker.task) + clear_bit(CALLBACK_PENDING, &mdev->flags); + if (ret < 0) /* Ignore any ERRNOs we got. */ ret = 0; @@ -795,8 +801,8 @@ static int drbd_check_al_size(struct drbd_conf *mdev) static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) { struct request_queue * const q = mdev->rq_queue; - int max_hw_sectors = max_bio_size >> 9; - int max_segments = 0; + unsigned int max_hw_sectors = max_bio_size >> 9; + unsigned int max_segments = 0; if (get_ldev_if_state(mdev, D_ATTACHING)) { struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; @@ -829,7 +835,7 @@ static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) { - int now, new, local, peer; + unsigned int now, new, local, peer; now = queue_max_hw_sectors(mdev->rq_queue) << 9; local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */ @@ -840,13 +846,14 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) mdev->local_max_bio_size = local; put_ldev(mdev); } + local = min(local, DRBD_MAX_BIO_SIZE); /* We may ignore peer limits if the peer is modern enough. Because new from 8.3.8 onwards the peer can use multiple BIOs for a single peer_request */ if (mdev->state.conn >= C_CONNECTED) { if (mdev->agreed_pro_version < 94) { - peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); + peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */ } else if (mdev->agreed_pro_version == 94) peer = DRBD_MAX_SIZE_H80_PACKET; @@ -854,10 +861,10 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) peer = DRBD_MAX_BIO_SIZE; } - new = min_t(int, local, peer); + new = min(local, peer); if (mdev->state.role == R_PRIMARY && new < now) - dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now); + dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now); if (new != now) dev_info(DEV, "max BIO size = %u\n", new); @@ -950,6 +957,14 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp * to realize a "hot spare" feature (not that I'd recommend that) */ wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt)); + /* make sure there is no leftover from previous force-detach attempts */ + clear_bit(FORCE_DETACH, &mdev->flags); + + /* and no leftover from previously aborted resync or verify, either */ + mdev->rs_total = 0; + mdev->rs_failed = 0; + atomic_set(&mdev->rs_pending_cnt, 0); + /* allocation not in the IO path, cqueue thread context */ nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); if (!nbc) { @@ -1345,6 +1360,7 @@ static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, } if (dt.detach_force) { + set_bit(FORCE_DETACH, &mdev->flags); drbd_force_state(mdev, NS(disk, D_FAILED)); reply->ret_code = SS_SUCCESS; goto out; @@ -1962,9 +1978,11 @@ static int drbd_nl_invalidate(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nl int retcode; /* If there is still bitmap IO pending, probably because of a previous - * resync just being finished, wait for it before requesting a new resync. */ + * resync just being finished, wait for it before requesting a new resync. + * Also wait for it's after_state_ch(). */ drbd_suspend_io(mdev); wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); + drbd_flush_workqueue(mdev); retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED); @@ -2003,9 +2021,11 @@ static int drbd_nl_invalidate_peer(struct drbd_conf *mdev, struct drbd_nl_cfg_re int retcode; /* If there is still bitmap IO pending, probably because of a previous - * resync just being finished, wait for it before requesting a new resync. */ + * resync just being finished, wait for it before requesting a new resync. + * Also wait for it's after_state_ch(). */ drbd_suspend_io(mdev); wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags)); + drbd_flush_workqueue(mdev); retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED); diff --git a/drivers/block/drbd/drbd_proc.c b/drivers/block/drbd/drbd_proc.c index 869bada2ed06..5496104f90b9 100644 --- a/drivers/block/drbd/drbd_proc.c +++ b/drivers/block/drbd/drbd_proc.c @@ -245,6 +245,9 @@ static int drbd_seq_show(struct seq_file *seq, void *v) mdev->state.role == R_SECONDARY) { seq_printf(seq, "%2d: cs:Unconfigured\n", i); } else { + /* reset mdev->congestion_reason */ + bdi_rw_congested(&mdev->rq_queue->backing_dev_info); + seq_printf(seq, "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n" " ns:%u nr:%u dw:%u dr:%u al:%u bm:%u " diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index ea4836e0ae98..c74ca2df7431 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -277,6 +277,9 @@ static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net) atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use; int i; + if (page == NULL) + return; + if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count) i = page_chain_free(page); else { @@ -316,7 +319,7 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, gfp_t gfp_mask) __must_hold(local) { struct drbd_epoch_entry *e; - struct page *page; + struct page *page = NULL; unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT; if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE)) @@ -329,9 +332,11 @@ struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev, return NULL; } - page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT)); - if (!page) - goto fail; + if (data_size) { + page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT)); + if (!page) + goto fail; + } INIT_HLIST_NODE(&e->collision); e->epoch = NULL; @@ -1270,7 +1275,6 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ data_size -= dgs; - ERR_IF(data_size == 0) return NULL; ERR_IF(data_size & 0x1ff) return NULL; ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL; @@ -1291,6 +1295,9 @@ read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __ if (!e) return NULL; + if (!data_size) + return e; + ds = data_size; page = e->pages; page_chain_for_each(page) { @@ -1715,6 +1722,10 @@ static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned dp_flags = be32_to_cpu(p->dp_flags); rw |= wire_flags_to_bio(mdev, dp_flags); + if (e->pages == NULL) { + D_ASSERT(e->size == 0); + D_ASSERT(dp_flags & DP_FLUSH); + } if (dp_flags & DP_MAY_SET_IN_SYNC) e->flags |= EE_MAY_SET_IN_SYNC; @@ -3801,11 +3812,18 @@ void drbd_free_tl_hash(struct drbd_conf *mdev) mdev->ee_hash = NULL; mdev->ee_hash_s = 0; - /* paranoia code */ - for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) - if (h->first) - dev_err(DEV, "ASSERT FAILED tl_hash[%u] == %p, expected NULL\n", - (int)(h - mdev->tl_hash), h->first); + /* We may not have had the chance to wait for all locally pending + * application requests. The hlist_add_fake() prevents access after + * free on master bio completion. */ + for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) { + struct drbd_request *req; + struct hlist_node *pos, *n; + hlist_for_each_entry_safe(req, pos, n, h, collision) { + hlist_del_init(&req->collision); + hlist_add_fake(&req->collision); + } + } + kfree(mdev->tl_hash); mdev->tl_hash = NULL; mdev->tl_hash_s = 0; diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 8e93a6ac9bb6..910335c30927 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -455,7 +455,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, req->rq_state |= RQ_LOCAL_COMPLETED; req->rq_state &= ~RQ_LOCAL_PENDING; - __drbd_chk_io_error(mdev, false); + __drbd_chk_io_error(mdev, DRBD_IO_ERROR); _req_may_be_done_not_susp(req, m); break; @@ -477,7 +477,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, break; } - __drbd_chk_io_error(mdev, false); + __drbd_chk_io_error(mdev, DRBD_IO_ERROR); goto_queue_for_net_read: @@ -1111,13 +1111,12 @@ void drbd_make_request(struct request_queue *q, struct bio *bio) /* * what we "blindly" assume: */ - D_ASSERT(bio->bi_size > 0); D_ASSERT((bio->bi_size & 0x1ff) == 0); /* to make some things easier, force alignment of requests within the * granularity of our hash tables */ s_enr = bio->bi_sector >> HT_SHIFT; - e_enr = (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT; + e_enr = bio->bi_size ? (bio->bi_sector+(bio->bi_size>>9)-1) >> HT_SHIFT : s_enr; if (likely(s_enr == e_enr)) { do { @@ -1275,7 +1274,7 @@ void request_timer_fn(unsigned long data) time_after(now, req->start_time + dt) && !time_in_range(now, mdev->last_reattach_jif, mdev->last_reattach_jif + dt)) { dev_warn(DEV, "Local backing device failed to meet the disk-timeout\n"); - __drbd_chk_io_error(mdev, 1); + __drbd_chk_io_error(mdev, DRBD_FORCE_DETACH); } nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et; spin_unlock_irq(&mdev->req_lock); diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 620c70ff2231..6bce2cc179d4 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -111,7 +111,7 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local) if (list_empty(&mdev->read_ee)) wake_up(&mdev->ee_wait); if (test_bit(__EE_WAS_ERROR, &e->flags)) - __drbd_chk_io_error(mdev, false); + __drbd_chk_io_error(mdev, DRBD_IO_ERROR); spin_unlock_irqrestore(&mdev->req_lock, flags); drbd_queue_work(&mdev->data.work, &e->w); @@ -154,7 +154,7 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo : list_empty(&mdev->active_ee); if (test_bit(__EE_WAS_ERROR, &e->flags)) - __drbd_chk_io_error(mdev, false); + __drbd_chk_io_error(mdev, DRBD_IO_ERROR); spin_unlock_irqrestore(&mdev->req_lock, flags); if (is_syncer_req) @@ -1501,14 +1501,6 @@ void drbd_start_resync(struct drbd_conf *mdev, enum drbd_conns side) return; } - if (mdev->state.conn < C_AHEAD) { - /* In case a previous resync run was aborted by an IO error/detach on the peer. */ - drbd_rs_cancel_all(mdev); - /* This should be done when we abort the resync. We definitely do not - want to have this for connections going back and forth between - Ahead/Behind and SyncSource/SyncTarget */ - } - if (side == C_SYNC_TARGET) { /* Since application IO was locked out during C_WF_BITMAP_T and C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 553f43a90953..a7d6347aaa79 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -191,6 +191,7 @@ static int print_unex = 1; #include <linux/mutex.h> #include <linux/io.h> #include <linux/uaccess.h> +#include <linux/async.h> /* * PS/2 floppies have much slower step rates than regular floppies. @@ -2516,8 +2517,7 @@ static int make_raw_rw_request(void) set_fdc((long)current_req->rq_disk->private_data); raw_cmd = &default_raw_cmd; - raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_DISK | - FD_RAW_NEED_SEEK; + raw_cmd->flags = FD_RAW_SPIN | FD_RAW_NEED_DISK | FD_RAW_NEED_SEEK; raw_cmd->cmd_count = NR_RW; if (rq_data_dir(current_req) == READ) { raw_cmd->flags |= FD_RAW_READ; @@ -4123,7 +4123,7 @@ static struct kobject *floppy_find(dev_t dev, int *part, void *data) return get_disk(disks[drive]); } -static int __init floppy_init(void) +static int __init do_floppy_init(void) { int i, unit, drive; int err, dr; @@ -4338,6 +4338,24 @@ out_put_disk: return err; } +#ifndef MODULE +static __init void floppy_async_init(void *data, async_cookie_t cookie) +{ + do_floppy_init(); +} +#endif + +static int __init floppy_init(void) +{ +#ifdef MODULE + return do_floppy_init(); +#else + /* Don't hold up the bootup by the floppy initialization */ + async_schedule(floppy_async_init, NULL); + return 0; +#endif +} + static const struct io_region { int offset; int size; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 76bc96fd01c8..d07c9f7fded6 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -485,7 +485,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req) nbd_end_request(req); } else { spin_lock(&nbd->queue_lock); - list_add(&req->queuelist, &nbd->queue_head); + list_add_tail(&req->queuelist, &nbd->queue_head); spin_unlock(&nbd->queue_lock); } diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 9a72277a31df..eb0d8216f557 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -513,42 +513,19 @@ static void process_page(unsigned long data) } } -struct mm_plug_cb { - struct blk_plug_cb cb; - struct cardinfo *card; -}; - -static void mm_unplug(struct blk_plug_cb *cb) +static void mm_unplug(struct blk_plug_cb *cb, bool from_schedule) { - struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb); + struct cardinfo *card = cb->data; - spin_lock_irq(&mmcb->card->lock); - activate(mmcb->card); - spin_unlock_irq(&mmcb->card->lock); - kfree(mmcb); + spin_lock_irq(&card->lock); + activate(card); + spin_unlock_irq(&card->lock); + kfree(cb); } static int mm_check_plugged(struct cardinfo *card) { - struct blk_plug *plug = current->plug; - struct mm_plug_cb *mmcb; - - if (!plug) - return 0; - - list_for_each_entry(mmcb, &plug->cb_list, cb.list) { - if (mmcb->cb.callback == mm_unplug && mmcb->card == card) - return 1; - } - /* Not currently on the callback list */ - mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC); - if (!mmcb) - return 0; - - mmcb->card = card; - mmcb->cb.callback = mm_unplug; - list_add(&mmcb->cb.list, &plug->cb_list); - return 1; + return !!blk_check_plugged(mm_unplug, card, sizeof(struct blk_plug_cb)); } static void mm_make_request(struct request_queue *q, struct bio *bio) diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index d45cf1bcbde5..d06ea2950dd9 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -53,6 +53,7 @@ config AMBA_PL08X bool "ARM PrimeCell PL080 or PL081 support" depends on ARM_AMBA && EXPERIMENTAL select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS help Platform has a PL08x DMAC device which can provide DMA engine support @@ -269,6 +270,7 @@ config DMA_SA11X0 tristate "SA-11x0 DMA support" depends on ARCH_SA1100 select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS help Support the DMA engine found on Intel StrongARM SA-1100 and SA-1110 SoCs. This DMA engine can only be used with on-chip @@ -284,9 +286,18 @@ config MMP_TDMA Say Y here if you enabled MMP ADMA, otherwise say N. +config DMA_OMAP + tristate "OMAP DMA support" + depends on ARCH_OMAP + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + config DMA_ENGINE bool +config DMA_VIRTUAL_CHANNELS + tristate + comment "DMA Clients" depends on DMA_ENGINE diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 640356add0a3..4cf6b128ab9a 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -2,6 +2,7 @@ ccflags-$(CONFIG_DMADEVICES_DEBUG) := -DDEBUG ccflags-$(CONFIG_DMADEVICES_VDEBUG) += -DVERBOSE_DEBUG obj-$(CONFIG_DMA_ENGINE) += dmaengine.o +obj-$(CONFIG_DMA_VIRTUAL_CHANNELS) += virt-dma.o obj-$(CONFIG_NET_DMA) += iovlock.o obj-$(CONFIG_INTEL_MID_DMAC) += intel_mid_dma.o obj-$(CONFIG_DMATEST) += dmatest.o @@ -30,3 +31,4 @@ obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o obj-$(CONFIG_MMP_TDMA) += mmp_tdma.o +obj-$(CONFIG_DMA_OMAP) += omap-dma.o diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c index 49ecbbb8932d..6fbeebb9486f 100644 --- a/drivers/dma/amba-pl08x.c +++ b/drivers/dma/amba-pl08x.c @@ -86,10 +86,12 @@ #include <asm/hardware/pl080.h> #include "dmaengine.h" +#include "virt-dma.h" #define DRIVER_NAME "pl08xdmac" static struct amba_driver pl08x_amba_driver; +struct pl08x_driver_data; /** * struct vendor_data - vendor-specific config parameters for PL08x derivatives @@ -119,6 +121,123 @@ struct pl08x_lli { }; /** + * struct pl08x_bus_data - information of source or destination + * busses for a transfer + * @addr: current address + * @maxwidth: the maximum width of a transfer on this bus + * @buswidth: the width of this bus in bytes: 1, 2 or 4 + */ +struct pl08x_bus_data { + dma_addr_t addr; + u8 maxwidth; + u8 buswidth; +}; + +/** + * struct pl08x_phy_chan - holder for the physical channels + * @id: physical index to this channel + * @lock: a lock to use when altering an instance of this struct + * @serving: the virtual channel currently being served by this physical + * channel + * @locked: channel unavailable for the system, e.g. dedicated to secure + * world + */ +struct pl08x_phy_chan { + unsigned int id; + void __iomem *base; + spinlock_t lock; + struct pl08x_dma_chan *serving; + bool locked; +}; + +/** + * struct pl08x_sg - structure containing data per sg + * @src_addr: src address of sg + * @dst_addr: dst address of sg + * @len: transfer len in bytes + * @node: node for txd's dsg_list + */ +struct pl08x_sg { + dma_addr_t src_addr; + dma_addr_t dst_addr; + size_t len; + struct list_head node; +}; + +/** + * struct pl08x_txd - wrapper for struct dma_async_tx_descriptor + * @vd: virtual DMA descriptor + * @dsg_list: list of children sg's + * @llis_bus: DMA memory address (physical) start for the LLIs + * @llis_va: virtual memory address start for the LLIs + * @cctl: control reg values for current txd + * @ccfg: config reg values for current txd + * @done: this marks completed descriptors, which should not have their + * mux released. + */ +struct pl08x_txd { + struct virt_dma_desc vd; + struct list_head dsg_list; + dma_addr_t llis_bus; + struct pl08x_lli *llis_va; + /* Default cctl value for LLIs */ + u32 cctl; + /* + * Settings to be put into the physical channel when we + * trigger this txd. Other registers are in llis_va[0]. + */ + u32 ccfg; + bool done; +}; + +/** + * struct pl08x_dma_chan_state - holds the PL08x specific virtual channel + * states + * @PL08X_CHAN_IDLE: the channel is idle + * @PL08X_CHAN_RUNNING: the channel has allocated a physical transport + * channel and is running a transfer on it + * @PL08X_CHAN_PAUSED: the channel has allocated a physical transport + * channel, but the transfer is currently paused + * @PL08X_CHAN_WAITING: the channel is waiting for a physical transport + * channel to become available (only pertains to memcpy channels) + */ +enum pl08x_dma_chan_state { + PL08X_CHAN_IDLE, + PL08X_CHAN_RUNNING, + PL08X_CHAN_PAUSED, + PL08X_CHAN_WAITING, +}; + +/** + * struct pl08x_dma_chan - this structure wraps a DMA ENGINE channel + * @vc: wrappped virtual channel + * @phychan: the physical channel utilized by this channel, if there is one + * @name: name of channel + * @cd: channel platform data + * @runtime_addr: address for RX/TX according to the runtime config + * @at: active transaction on this channel + * @lock: a lock for this channel data + * @host: a pointer to the host (internal use) + * @state: whether the channel is idle, paused, running etc + * @slave: whether this channel is a device (slave) or for memcpy + * @signal: the physical DMA request signal which this channel is using + * @mux_use: count of descriptors using this DMA request signal setting + */ +struct pl08x_dma_chan { + struct virt_dma_chan vc; + struct pl08x_phy_chan *phychan; + const char *name; + const struct pl08x_channel_data *cd; + struct dma_slave_config cfg; + struct pl08x_txd *at; + struct pl08x_driver_data *host; + enum pl08x_dma_chan_state state; + bool slave; + int signal; + unsigned mux_use; +}; + +/** * struct pl08x_driver_data - the local state holder for the PL08x * @slave: slave engine for this instance * @memcpy: memcpy engine for this instance @@ -128,7 +247,6 @@ struct pl08x_lli { * @pd: platform data passed in from the platform/machine * @phy_chans: array of data for the physical channels * @pool: a pool for the LLI descriptors - * @pool_ctr: counter of LLIs in the pool * @lli_buses: bitmask to or in to LLI pointer selecting AHB port for LLI * fetches * @mem_buses: set to indicate memory transfers on AHB2. @@ -143,10 +261,8 @@ struct pl08x_driver_data { struct pl08x_platform_data *pd; struct pl08x_phy_chan *phy_chans; struct dma_pool *pool; - int pool_ctr; u8 lli_buses; u8 mem_buses; - spinlock_t lock; }; /* @@ -162,12 +278,51 @@ struct pl08x_driver_data { static inline struct pl08x_dma_chan *to_pl08x_chan(struct dma_chan *chan) { - return container_of(chan, struct pl08x_dma_chan, chan); + return container_of(chan, struct pl08x_dma_chan, vc.chan); } static inline struct pl08x_txd *to_pl08x_txd(struct dma_async_tx_descriptor *tx) { - return container_of(tx, struct pl08x_txd, tx); + return container_of(tx, struct pl08x_txd, vd.tx); +} + +/* + * Mux handling. + * + * This gives us the DMA request input to the PL08x primecell which the + * peripheral described by the channel data will be routed to, possibly + * via a board/SoC specific external MUX. One important point to note + * here is that this does not depend on the physical channel. + */ +static int pl08x_request_mux(struct pl08x_dma_chan *plchan) +{ + const struct pl08x_platform_data *pd = plchan->host->pd; + int ret; + + if (plchan->mux_use++ == 0 && pd->get_signal) { + ret = pd->get_signal(plchan->cd); + if (ret < 0) { + plchan->mux_use = 0; + return ret; + } + + plchan->signal = ret; + } + return 0; +} + +static void pl08x_release_mux(struct pl08x_dma_chan *plchan) +{ + const struct pl08x_platform_data *pd = plchan->host->pd; + + if (plchan->signal >= 0) { + WARN_ON(plchan->mux_use == 0); + + if (--plchan->mux_use == 0 && pd->put_signal) { + pd->put_signal(plchan->cd, plchan->signal); + plchan->signal = -1; + } + } } /* @@ -189,20 +344,25 @@ static int pl08x_phy_channel_busy(struct pl08x_phy_chan *ch) * been set when the LLIs were constructed. Poke them into the hardware * and start the transfer. */ -static void pl08x_start_txd(struct pl08x_dma_chan *plchan, - struct pl08x_txd *txd) +static void pl08x_start_next_txd(struct pl08x_dma_chan *plchan) { struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_phy_chan *phychan = plchan->phychan; - struct pl08x_lli *lli = &txd->llis_va[0]; + struct virt_dma_desc *vd = vchan_next_desc(&plchan->vc); + struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); + struct pl08x_lli *lli; u32 val; + list_del(&txd->vd.node); + plchan->at = txd; /* Wait for channel inactive */ while (pl08x_phy_channel_busy(phychan)) cpu_relax(); + lli = &txd->llis_va[0]; + dev_vdbg(&pl08x->adev->dev, "WRITE channel %d: csrc=0x%08x, cdst=0x%08x, " "clli=0x%08x, cctl=0x%08x, ccfg=0x%08x\n", @@ -311,10 +471,8 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) { struct pl08x_phy_chan *ch; struct pl08x_txd *txd; - unsigned long flags; size_t bytes = 0; - spin_lock_irqsave(&plchan->lock, flags); ch = plchan->phychan; txd = plchan->at; @@ -354,18 +512,6 @@ static u32 pl08x_getbytes_chan(struct pl08x_dma_chan *plchan) } } - /* Sum up all queued transactions */ - if (!list_empty(&plchan->pend_list)) { - struct pl08x_txd *txdi; - list_for_each_entry(txdi, &plchan->pend_list, node) { - struct pl08x_sg *dsg; - list_for_each_entry(dsg, &txd->dsg_list, node) - bytes += dsg->len; - } - } - - spin_unlock_irqrestore(&plchan->lock, flags); - return bytes; } @@ -391,7 +537,6 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, if (!ch->locked && !ch->serving) { ch->serving = virt_chan; - ch->signal = -1; spin_unlock_irqrestore(&ch->lock, flags); break; } @@ -404,25 +549,114 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x, return NULL; } - pm_runtime_get_sync(&pl08x->adev->dev); return ch; } +/* Mark the physical channel as free. Note, this write is atomic. */ static inline void pl08x_put_phy_channel(struct pl08x_driver_data *pl08x, struct pl08x_phy_chan *ch) { - unsigned long flags; + ch->serving = NULL; +} + +/* + * Try to allocate a physical channel. When successful, assign it to + * this virtual channel, and initiate the next descriptor. The + * virtual channel lock must be held at this point. + */ +static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) +{ + struct pl08x_driver_data *pl08x = plchan->host; + struct pl08x_phy_chan *ch; - spin_lock_irqsave(&ch->lock, flags); + ch = pl08x_get_phy_channel(pl08x, plchan); + if (!ch) { + dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); + plchan->state = PL08X_CHAN_WAITING; + return; + } - /* Stop the channel and clear its interrupts */ - pl08x_terminate_phy_chan(pl08x, ch); + dev_dbg(&pl08x->adev->dev, "allocated physical channel %d for xfer on %s\n", + ch->id, plchan->name); - pm_runtime_put(&pl08x->adev->dev); + plchan->phychan = ch; + plchan->state = PL08X_CHAN_RUNNING; + pl08x_start_next_txd(plchan); +} - /* Mark it as free */ - ch->serving = NULL; - spin_unlock_irqrestore(&ch->lock, flags); +static void pl08x_phy_reassign_start(struct pl08x_phy_chan *ch, + struct pl08x_dma_chan *plchan) +{ + struct pl08x_driver_data *pl08x = plchan->host; + + dev_dbg(&pl08x->adev->dev, "reassigned physical channel %d for xfer on %s\n", + ch->id, plchan->name); + + /* + * We do this without taking the lock; we're really only concerned + * about whether this pointer is NULL or not, and we're guaranteed + * that this will only be called when it _already_ is non-NULL. + */ + ch->serving = plchan; + plchan->phychan = ch; + plchan->state = PL08X_CHAN_RUNNING; + pl08x_start_next_txd(plchan); +} + +/* + * Free a physical DMA channel, potentially reallocating it to another + * virtual channel if we have any pending. + */ +static void pl08x_phy_free(struct pl08x_dma_chan *plchan) +{ + struct pl08x_driver_data *pl08x = plchan->host; + struct pl08x_dma_chan *p, *next; + + retry: + next = NULL; + + /* Find a waiting virtual channel for the next transfer. */ + list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) + if (p->state == PL08X_CHAN_WAITING) { + next = p; + break; + } + + if (!next) { + list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) + if (p->state == PL08X_CHAN_WAITING) { + next = p; + break; + } + } + + /* Ensure that the physical channel is stopped */ + pl08x_terminate_phy_chan(pl08x, plchan->phychan); + + if (next) { + bool success; + + /* + * Eww. We know this isn't going to deadlock + * but lockdep probably doesn't. + */ + spin_lock(&next->vc.lock); + /* Re-check the state now that we have the lock */ + success = next->state == PL08X_CHAN_WAITING; + if (success) + pl08x_phy_reassign_start(plchan->phychan, next); + spin_unlock(&next->vc.lock); + + /* If the state changed, try to find another channel */ + if (!success) + goto retry; + } else { + /* No more jobs, so free up the physical channel */ + pl08x_put_phy_channel(pl08x, plchan->phychan); + } + + plchan->phychan = NULL; + plchan->state = PL08X_CHAN_IDLE; } /* @@ -585,8 +819,6 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, return 0; } - pl08x->pool_ctr++; - bd.txd = txd; bd.lli_bus = (pl08x->lli_buses & PL08X_AHB2) ? PL080_LLI_LM_AHB2 : 0; cctl = txd->cctl; @@ -802,18 +1034,14 @@ static int pl08x_fill_llis_for_desc(struct pl08x_driver_data *pl08x, return num_llis; } -/* You should call this with the struct pl08x lock held */ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, struct pl08x_txd *txd) { struct pl08x_sg *dsg, *_dsg; - /* Free the LLI */ if (txd->llis_va) dma_pool_free(pl08x->pool, txd->llis_va, txd->llis_bus); - pl08x->pool_ctr--; - list_for_each_entry_safe(dsg, _dsg, &txd->dsg_list, node) { list_del(&dsg->node); kfree(dsg); @@ -822,133 +1050,75 @@ static void pl08x_free_txd(struct pl08x_driver_data *pl08x, kfree(txd); } -static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, - struct pl08x_dma_chan *plchan) +static void pl08x_unmap_buffers(struct pl08x_txd *txd) { - struct pl08x_txd *txdi = NULL; - struct pl08x_txd *next; - - if (!list_empty(&plchan->pend_list)) { - list_for_each_entry_safe(txdi, - next, &plchan->pend_list, node) { - list_del(&txdi->node); - pl08x_free_txd(pl08x, txdi); + struct device *dev = txd->vd.tx.chan->device->dev; + struct pl08x_sg *dsg; + + if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { + if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_single(dev, dsg->src_addr, dsg->len, + DMA_TO_DEVICE); + else { + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_page(dev, dsg->src_addr, dsg->len, + DMA_TO_DEVICE); } } + if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { + if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_single(dev, dsg->dst_addr, dsg->len, + DMA_FROM_DEVICE); + else + list_for_each_entry(dsg, &txd->dsg_list, node) + dma_unmap_page(dev, dsg->dst_addr, dsg->len, + DMA_FROM_DEVICE); + } } -/* - * The DMA ENGINE API - */ -static int pl08x_alloc_chan_resources(struct dma_chan *chan) +static void pl08x_desc_free(struct virt_dma_desc *vd) { - return 0; -} + struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); + struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan); -static void pl08x_free_chan_resources(struct dma_chan *chan) -{ + if (!plchan->slave) + pl08x_unmap_buffers(txd); + + if (!txd->done) + pl08x_release_mux(plchan); + + pl08x_free_txd(plchan->host, txd); } -/* - * This should be called with the channel plchan->lock held - */ -static int prep_phy_channel(struct pl08x_dma_chan *plchan, - struct pl08x_txd *txd) +static void pl08x_free_txd_list(struct pl08x_driver_data *pl08x, + struct pl08x_dma_chan *plchan) { - struct pl08x_driver_data *pl08x = plchan->host; - struct pl08x_phy_chan *ch; - int ret; - - /* Check if we already have a channel */ - if (plchan->phychan) { - ch = plchan->phychan; - goto got_channel; - } + LIST_HEAD(head); + struct pl08x_txd *txd; - ch = pl08x_get_phy_channel(pl08x, plchan); - if (!ch) { - /* No physical channel available, cope with it */ - dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); - return -EBUSY; - } + vchan_get_all_descriptors(&plchan->vc, &head); - /* - * OK we have a physical channel: for memcpy() this is all we - * need, but for slaves the physical signals may be muxed! - * Can the platform allow us to use this channel? - */ - if (plchan->slave && pl08x->pd->get_signal) { - ret = pl08x->pd->get_signal(plchan); - if (ret < 0) { - dev_dbg(&pl08x->adev->dev, - "unable to use physical channel %d for transfer on %s due to platform restrictions\n", - ch->id, plchan->name); - /* Release physical channel & return */ - pl08x_put_phy_channel(pl08x, ch); - return -EBUSY; - } - ch->signal = ret; + while (!list_empty(&head)) { + txd = list_first_entry(&head, struct pl08x_txd, vd.node); + list_del(&txd->vd.node); + pl08x_desc_free(&txd->vd); } - - plchan->phychan = ch; - dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n", - ch->id, - ch->signal, - plchan->name); - -got_channel: - /* Assign the flow control signal to this channel */ - if (txd->direction == DMA_MEM_TO_DEV) - txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT; - else if (txd->direction == DMA_DEV_TO_MEM) - txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT; - - plchan->phychan_hold++; - - return 0; } -static void release_phy_channel(struct pl08x_dma_chan *plchan) +/* + * The DMA ENGINE API + */ +static int pl08x_alloc_chan_resources(struct dma_chan *chan) { - struct pl08x_driver_data *pl08x = plchan->host; - - if ((plchan->phychan->signal >= 0) && pl08x->pd->put_signal) { - pl08x->pd->put_signal(plchan); - plchan->phychan->signal = -1; - } - pl08x_put_phy_channel(pl08x, plchan->phychan); - plchan->phychan = NULL; + return 0; } -static dma_cookie_t pl08x_tx_submit(struct dma_async_tx_descriptor *tx) +static void pl08x_free_chan_resources(struct dma_chan *chan) { - struct pl08x_dma_chan *plchan = to_pl08x_chan(tx->chan); - struct pl08x_txd *txd = to_pl08x_txd(tx); - unsigned long flags; - dma_cookie_t cookie; - - spin_lock_irqsave(&plchan->lock, flags); - cookie = dma_cookie_assign(tx); - - /* Put this onto the pending list */ - list_add_tail(&txd->node, &plchan->pend_list); - - /* - * If there was no physical channel available for this memcpy, - * stack the request up and indicate that the channel is waiting - * for a free physical channel. - */ - if (!plchan->slave && !plchan->phychan) { - /* Do this memcpy whenever there is a channel ready */ - plchan->state = PL08X_CHAN_WAITING; - plchan->waiting = txd; - } else { - plchan->phychan_hold--; - } - - spin_unlock_irqrestore(&plchan->lock, flags); - - return cookie; + /* Ensure all queued descriptors are freed */ + vchan_free_chan_resources(to_virt_chan(chan)); } static struct dma_async_tx_descriptor *pl08x_prep_dma_interrupt( @@ -968,23 +1138,53 @@ static enum dma_status pl08x_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); + struct virt_dma_desc *vd; + unsigned long flags; enum dma_status ret; + size_t bytes = 0; ret = dma_cookie_status(chan, cookie, txstate); if (ret == DMA_SUCCESS) return ret; /* + * There's no point calculating the residue if there's + * no txstate to store the value. + */ + if (!txstate) { + if (plchan->state == PL08X_CHAN_PAUSED) + ret = DMA_PAUSED; + return ret; + } + + spin_lock_irqsave(&plchan->vc.lock, flags); + ret = dma_cookie_status(chan, cookie, txstate); + if (ret != DMA_SUCCESS) { + vd = vchan_find_desc(&plchan->vc, cookie); + if (vd) { + /* On the issued list, so hasn't been processed yet */ + struct pl08x_txd *txd = to_pl08x_txd(&vd->tx); + struct pl08x_sg *dsg; + + list_for_each_entry(dsg, &txd->dsg_list, node) + bytes += dsg->len; + } else { + bytes = pl08x_getbytes_chan(plchan); + } + } + spin_unlock_irqrestore(&plchan->vc.lock, flags); + + /* * This cookie not complete yet * Get number of bytes left in the active transactions and queue */ - dma_set_residue(txstate, pl08x_getbytes_chan(plchan)); + dma_set_residue(txstate, bytes); - if (plchan->state == PL08X_CHAN_PAUSED) - return DMA_PAUSED; + if (plchan->state == PL08X_CHAN_PAUSED && ret == DMA_IN_PROGRESS) + ret = DMA_PAUSED; /* Whether waiting or running, we're in progress */ - return DMA_IN_PROGRESS; + return ret; } /* PrimeCell DMA extension */ @@ -1080,38 +1280,14 @@ static u32 pl08x_burst(u32 maxburst) return burst_sizes[i].reg; } -static int dma_set_runtime_config(struct dma_chan *chan, - struct dma_slave_config *config) +static u32 pl08x_get_cctl(struct pl08x_dma_chan *plchan, + enum dma_slave_buswidth addr_width, u32 maxburst) { - struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); - struct pl08x_driver_data *pl08x = plchan->host; - enum dma_slave_buswidth addr_width; - u32 width, burst, maxburst; - u32 cctl = 0; - - if (!plchan->slave) - return -EINVAL; - - /* Transfer direction */ - plchan->runtime_direction = config->direction; - if (config->direction == DMA_MEM_TO_DEV) { - addr_width = config->dst_addr_width; - maxburst = config->dst_maxburst; - } else if (config->direction == DMA_DEV_TO_MEM) { - addr_width = config->src_addr_width; - maxburst = config->src_maxburst; - } else { - dev_err(&pl08x->adev->dev, - "bad runtime_config: alien transfer direction\n"); - return -EINVAL; - } + u32 width, burst, cctl = 0; width = pl08x_width(addr_width); - if (width == ~0) { - dev_err(&pl08x->adev->dev, - "bad runtime_config: alien address width\n"); - return -EINVAL; - } + if (width == ~0) + return ~0; cctl |= width << PL080_CONTROL_SWIDTH_SHIFT; cctl |= width << PL080_CONTROL_DWIDTH_SHIFT; @@ -1128,28 +1304,23 @@ static int dma_set_runtime_config(struct dma_chan *chan, cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT; cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT; - plchan->device_fc = config->device_fc; + return pl08x_cctl(cctl); +} - if (plchan->runtime_direction == DMA_DEV_TO_MEM) { - plchan->src_addr = config->src_addr; - plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR | - pl08x_select_bus(plchan->cd->periph_buses, - pl08x->mem_buses); - } else { - plchan->dst_addr = config->dst_addr; - plchan->dst_cctl = pl08x_cctl(cctl) | PL080_CONTROL_SRC_INCR | - pl08x_select_bus(pl08x->mem_buses, - plchan->cd->periph_buses); - } +static int dma_set_runtime_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); - dev_dbg(&pl08x->adev->dev, - "configured channel %s (%s) for %s, data width %d, " - "maxburst %d words, LE, CCTL=0x%08x\n", - dma_chan_name(chan), plchan->name, - (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX", - addr_width, - maxburst, - cctl); + if (!plchan->slave) + return -EINVAL; + + /* Reject definitely invalid configurations */ + if (config->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || + config->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) + return -EINVAL; + + plchan->cfg = *config; return 0; } @@ -1163,95 +1334,19 @@ static void pl08x_issue_pending(struct dma_chan *chan) struct pl08x_dma_chan *plchan = to_pl08x_chan(chan); unsigned long flags; - spin_lock_irqsave(&plchan->lock, flags); - /* Something is already active, or we're waiting for a channel... */ - if (plchan->at || plchan->state == PL08X_CHAN_WAITING) { - spin_unlock_irqrestore(&plchan->lock, flags); - return; - } - - /* Take the first element in the queue and execute it */ - if (!list_empty(&plchan->pend_list)) { - struct pl08x_txd *next; - - next = list_first_entry(&plchan->pend_list, - struct pl08x_txd, - node); - list_del(&next->node); - plchan->state = PL08X_CHAN_RUNNING; - - pl08x_start_txd(plchan, next); + spin_lock_irqsave(&plchan->vc.lock, flags); + if (vchan_issue_pending(&plchan->vc)) { + if (!plchan->phychan && plchan->state != PL08X_CHAN_WAITING) + pl08x_phy_alloc_and_start(plchan); } - - spin_unlock_irqrestore(&plchan->lock, flags); -} - -static int pl08x_prep_channel_resources(struct pl08x_dma_chan *plchan, - struct pl08x_txd *txd) -{ - struct pl08x_driver_data *pl08x = plchan->host; - unsigned long flags; - int num_llis, ret; - - num_llis = pl08x_fill_llis_for_desc(pl08x, txd); - if (!num_llis) { - spin_lock_irqsave(&plchan->lock, flags); - pl08x_free_txd(pl08x, txd); - spin_unlock_irqrestore(&plchan->lock, flags); - return -EINVAL; - } - - spin_lock_irqsave(&plchan->lock, flags); - - /* - * See if we already have a physical channel allocated, - * else this is the time to try to get one. - */ - ret = prep_phy_channel(plchan, txd); - if (ret) { - /* - * No physical channel was available. - * - * memcpy transfers can be sorted out at submission time. - * - * Slave transfers may have been denied due to platform - * channel muxing restrictions. Since there is no guarantee - * that this will ever be resolved, and the signal must be - * acquired AFTER acquiring the physical channel, we will let - * them be NACK:ed with -EBUSY here. The drivers can retry - * the prep() call if they are eager on doing this using DMA. - */ - if (plchan->slave) { - pl08x_free_txd_list(pl08x, plchan); - pl08x_free_txd(pl08x, txd); - spin_unlock_irqrestore(&plchan->lock, flags); - return -EBUSY; - } - } else - /* - * Else we're all set, paused and ready to roll, status - * will switch to PL08X_CHAN_RUNNING when we call - * issue_pending(). If there is something running on the - * channel already we don't change its state. - */ - if (plchan->state == PL08X_CHAN_IDLE) - plchan->state = PL08X_CHAN_PAUSED; - - spin_unlock_irqrestore(&plchan->lock, flags); - - return 0; + spin_unlock_irqrestore(&plchan->vc.lock, flags); } -static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan, - unsigned long flags) +static struct pl08x_txd *pl08x_get_txd(struct pl08x_dma_chan *plchan) { struct pl08x_txd *txd = kzalloc(sizeof(*txd), GFP_NOWAIT); if (txd) { - dma_async_tx_descriptor_init(&txd->tx, &plchan->chan); - txd->tx.flags = flags; - txd->tx.tx_submit = pl08x_tx_submit; - INIT_LIST_HEAD(&txd->node); INIT_LIST_HEAD(&txd->dsg_list); /* Always enable error and terminal interrupts */ @@ -1274,7 +1369,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( struct pl08x_sg *dsg; int ret; - txd = pl08x_get_txd(plchan, flags); + txd = pl08x_get_txd(plchan); if (!txd) { dev_err(&pl08x->adev->dev, "%s no memory for descriptor\n", __func__); @@ -1290,14 +1385,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( } list_add_tail(&dsg->node, &txd->dsg_list); - txd->direction = DMA_NONE; dsg->src_addr = src; dsg->dst_addr = dest; dsg->len = len; /* Set platform data for m2m */ txd->ccfg |= PL080_FLOW_MEM2MEM << PL080_CONFIG_FLOW_CONTROL_SHIFT; - txd->cctl = pl08x->pd->memcpy_channel.cctl & + txd->cctl = pl08x->pd->memcpy_channel.cctl_memcpy & ~(PL080_CONTROL_DST_AHB2 | PL080_CONTROL_SRC_AHB2); /* Both to be incremented or the code will break */ @@ -1307,11 +1401,13 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy( txd->cctl |= pl08x_select_bus(pl08x->mem_buses, pl08x->mem_buses); - ret = pl08x_prep_channel_resources(plchan, txd); - if (ret) + ret = pl08x_fill_llis_for_desc(plchan->host, txd); + if (!ret) { + pl08x_free_txd(pl08x, txd); return NULL; + } - return &txd->tx; + return vchan_tx_prep(&plchan->vc, &txd->vd, flags); } static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( @@ -1324,36 +1420,40 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( struct pl08x_txd *txd; struct pl08x_sg *dsg; struct scatterlist *sg; + enum dma_slave_buswidth addr_width; dma_addr_t slave_addr; int ret, tmp; + u8 src_buses, dst_buses; + u32 maxburst, cctl; dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", __func__, sg_dma_len(sgl), plchan->name); - txd = pl08x_get_txd(plchan, flags); + txd = pl08x_get_txd(plchan); if (!txd) { dev_err(&pl08x->adev->dev, "%s no txd\n", __func__); return NULL; } - if (direction != plchan->runtime_direction) - dev_err(&pl08x->adev->dev, "%s DMA setup does not match " - "the direction configured for the PrimeCell\n", - __func__); - /* * Set up addresses, the PrimeCell configured address * will take precedence since this may configure the * channel target address dynamically at runtime. */ - txd->direction = direction; - if (direction == DMA_MEM_TO_DEV) { - txd->cctl = plchan->dst_cctl; - slave_addr = plchan->dst_addr; + cctl = PL080_CONTROL_SRC_INCR; + slave_addr = plchan->cfg.dst_addr; + addr_width = plchan->cfg.dst_addr_width; + maxburst = plchan->cfg.dst_maxburst; + src_buses = pl08x->mem_buses; + dst_buses = plchan->cd->periph_buses; } else if (direction == DMA_DEV_TO_MEM) { - txd->cctl = plchan->src_cctl; - slave_addr = plchan->src_addr; + cctl = PL080_CONTROL_DST_INCR; + slave_addr = plchan->cfg.src_addr; + addr_width = plchan->cfg.src_addr_width; + maxburst = plchan->cfg.src_maxburst; + src_buses = plchan->cd->periph_buses; + dst_buses = pl08x->mem_buses; } else { pl08x_free_txd(pl08x, txd); dev_err(&pl08x->adev->dev, @@ -1361,7 +1461,17 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( return NULL; } - if (plchan->device_fc) + cctl |= pl08x_get_cctl(plchan, addr_width, maxburst); + if (cctl == ~0) { + pl08x_free_txd(pl08x, txd); + dev_err(&pl08x->adev->dev, + "DMA slave configuration botched?\n"); + return NULL; + } + + txd->cctl = cctl | pl08x_select_bus(src_buses, dst_buses); + + if (plchan->cfg.device_fc) tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER : PL080_FLOW_PER2MEM_PER; else @@ -1370,9 +1480,28 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT; + ret = pl08x_request_mux(plchan); + if (ret < 0) { + pl08x_free_txd(pl08x, txd); + dev_dbg(&pl08x->adev->dev, + "unable to mux for transfer on %s due to platform restrictions\n", + plchan->name); + return NULL; + } + + dev_dbg(&pl08x->adev->dev, "allocated DMA request signal %d for xfer on %s\n", + plchan->signal, plchan->name); + + /* Assign the flow control signal to this channel */ + if (direction == DMA_MEM_TO_DEV) + txd->ccfg |= plchan->signal << PL080_CONFIG_DST_SEL_SHIFT; + else + txd->ccfg |= plchan->signal << PL080_CONFIG_SRC_SEL_SHIFT; + for_each_sg(sgl, sg, sg_len, tmp) { dsg = kzalloc(sizeof(struct pl08x_sg), GFP_NOWAIT); if (!dsg) { + pl08x_release_mux(plchan); pl08x_free_txd(pl08x, txd); dev_err(&pl08x->adev->dev, "%s no mem for pl080 sg\n", __func__); @@ -1390,11 +1519,14 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg( } } - ret = pl08x_prep_channel_resources(plchan, txd); - if (ret) + ret = pl08x_fill_llis_for_desc(plchan->host, txd); + if (!ret) { + pl08x_release_mux(plchan); + pl08x_free_txd(pl08x, txd); return NULL; + } - return &txd->tx; + return vchan_tx_prep(&plchan->vc, &txd->vd, flags); } static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, @@ -1415,9 +1547,9 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, * Anything succeeds on channels with no physical allocation and * no queued transfers. */ - spin_lock_irqsave(&plchan->lock, flags); + spin_lock_irqsave(&plchan->vc.lock, flags); if (!plchan->phychan && !plchan->at) { - spin_unlock_irqrestore(&plchan->lock, flags); + spin_unlock_irqrestore(&plchan->vc.lock, flags); return 0; } @@ -1426,18 +1558,15 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, plchan->state = PL08X_CHAN_IDLE; if (plchan->phychan) { - pl08x_terminate_phy_chan(pl08x, plchan->phychan); - /* * Mark physical channel as free and free any slave * signal */ - release_phy_channel(plchan); - plchan->phychan_hold = 0; + pl08x_phy_free(plchan); } /* Dequeue jobs and free LLIs */ if (plchan->at) { - pl08x_free_txd(pl08x, plchan->at); + pl08x_desc_free(&plchan->at->vd); plchan->at = NULL; } /* Dequeue jobs not yet fired as well */ @@ -1457,7 +1586,7 @@ static int pl08x_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, break; } - spin_unlock_irqrestore(&plchan->lock, flags); + spin_unlock_irqrestore(&plchan->vc.lock, flags); return ret; } @@ -1494,123 +1623,6 @@ static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); } -static void pl08x_unmap_buffers(struct pl08x_txd *txd) -{ - struct device *dev = txd->tx.chan->device->dev; - struct pl08x_sg *dsg; - - if (!(txd->tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) { - if (txd->tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - else { - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->src_addr, dsg->len, - DMA_TO_DEVICE); - } - } - if (!(txd->tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) { - if (txd->tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE) - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_single(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - else - list_for_each_entry(dsg, &txd->dsg_list, node) - dma_unmap_page(dev, dsg->dst_addr, dsg->len, - DMA_FROM_DEVICE); - } -} - -static void pl08x_tasklet(unsigned long data) -{ - struct pl08x_dma_chan *plchan = (struct pl08x_dma_chan *) data; - struct pl08x_driver_data *pl08x = plchan->host; - struct pl08x_txd *txd; - unsigned long flags; - - spin_lock_irqsave(&plchan->lock, flags); - - txd = plchan->at; - plchan->at = NULL; - - if (txd) { - /* Update last completed */ - dma_cookie_complete(&txd->tx); - } - - /* If a new descriptor is queued, set it up plchan->at is NULL here */ - if (!list_empty(&plchan->pend_list)) { - struct pl08x_txd *next; - - next = list_first_entry(&plchan->pend_list, - struct pl08x_txd, - node); - list_del(&next->node); - - pl08x_start_txd(plchan, next); - } else if (plchan->phychan_hold) { - /* - * This channel is still in use - we have a new txd being - * prepared and will soon be queued. Don't give up the - * physical channel. - */ - } else { - struct pl08x_dma_chan *waiting = NULL; - - /* - * No more jobs, so free up the physical channel - * Free any allocated signal on slave transfers too - */ - release_phy_channel(plchan); - plchan->state = PL08X_CHAN_IDLE; - - /* - * And NOW before anyone else can grab that free:d up - * physical channel, see if there is some memcpy pending - * that seriously needs to start because of being stacked - * up while we were choking the physical channels with data. - */ - list_for_each_entry(waiting, &pl08x->memcpy.channels, - chan.device_node) { - if (waiting->state == PL08X_CHAN_WAITING && - waiting->waiting != NULL) { - int ret; - - /* This should REALLY not fail now */ - ret = prep_phy_channel(waiting, - waiting->waiting); - BUG_ON(ret); - waiting->phychan_hold--; - waiting->state = PL08X_CHAN_RUNNING; - waiting->waiting = NULL; - pl08x_issue_pending(&waiting->chan); - break; - } - } - } - - spin_unlock_irqrestore(&plchan->lock, flags); - - if (txd) { - dma_async_tx_callback callback = txd->tx.callback; - void *callback_param = txd->tx.callback_param; - - /* Don't try to unmap buffers on slave channels */ - if (!plchan->slave) - pl08x_unmap_buffers(txd); - - /* Free the descriptor */ - spin_lock_irqsave(&plchan->lock, flags); - pl08x_free_txd(pl08x, txd); - spin_unlock_irqrestore(&plchan->lock, flags); - - /* Callback to signal completion */ - if (callback) - callback(callback_param); - } -} - static irqreturn_t pl08x_irq(int irq, void *dev) { struct pl08x_driver_data *pl08x = dev; @@ -1635,6 +1647,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev) /* Locate physical channel */ struct pl08x_phy_chan *phychan = &pl08x->phy_chans[i]; struct pl08x_dma_chan *plchan = phychan->serving; + struct pl08x_txd *tx; if (!plchan) { dev_err(&pl08x->adev->dev, @@ -1643,8 +1656,29 @@ static irqreturn_t pl08x_irq(int irq, void *dev) continue; } - /* Schedule tasklet on this channel */ - tasklet_schedule(&plchan->tasklet); + spin_lock(&plchan->vc.lock); + tx = plchan->at; + if (tx) { + plchan->at = NULL; + /* + * This descriptor is done, release its mux + * reservation. + */ + pl08x_release_mux(plchan); + tx->done = true; + vchan_cookie_complete(&tx->vd); + + /* + * And start the next descriptor (if any), + * otherwise free this channel. + */ + if (vchan_next_desc(&plchan->vc)) + pl08x_start_next_txd(plchan); + else + pl08x_phy_free(plchan); + } + spin_unlock(&plchan->vc.lock); + mask |= (1 << i); } } @@ -1654,16 +1688,10 @@ static irqreturn_t pl08x_irq(int irq, void *dev) static void pl08x_dma_slave_init(struct pl08x_dma_chan *chan) { - u32 cctl = pl08x_cctl(chan->cd->cctl); - chan->slave = true; chan->name = chan->cd->bus_id; - chan->src_addr = chan->cd->addr; - chan->dst_addr = chan->cd->addr; - chan->src_cctl = cctl | PL080_CONTROL_DST_INCR | - pl08x_select_bus(chan->cd->periph_buses, chan->host->mem_buses); - chan->dst_cctl = cctl | PL080_CONTROL_SRC_INCR | - pl08x_select_bus(chan->host->mem_buses, chan->cd->periph_buses); + chan->cfg.src_addr = chan->cd->addr; + chan->cfg.dst_addr = chan->cd->addr; } /* @@ -1693,6 +1721,7 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, chan->host = pl08x; chan->state = PL08X_CHAN_IDLE; + chan->signal = -1; if (slave) { chan->cd = &pl08x->pd->slave_channels[i]; @@ -1705,26 +1734,12 @@ static int pl08x_dma_init_virtual_channels(struct pl08x_driver_data *pl08x, return -ENOMEM; } } - if (chan->cd->circular_buffer) { - dev_err(&pl08x->adev->dev, - "channel %s: circular buffers not supported\n", - chan->name); - kfree(chan); - continue; - } dev_dbg(&pl08x->adev->dev, "initialize virtual channel \"%s\"\n", chan->name); - chan->chan.device = dmadev; - dma_cookie_init(&chan->chan); - - spin_lock_init(&chan->lock); - INIT_LIST_HEAD(&chan->pend_list); - tasklet_init(&chan->tasklet, pl08x_tasklet, - (unsigned long) chan); - - list_add_tail(&chan->chan.device_node, &dmadev->channels); + chan->vc.desc_free = pl08x_desc_free; + vchan_init(&chan->vc, dmadev); } dev_info(&pl08x->adev->dev, "initialized %d virtual %s channels\n", i, slave ? "slave" : "memcpy"); @@ -1737,8 +1752,8 @@ static void pl08x_free_virtual_channels(struct dma_device *dmadev) struct pl08x_dma_chan *next; list_for_each_entry_safe(chan, - next, &dmadev->channels, chan.device_node) { - list_del(&chan->chan.device_node); + next, &dmadev->channels, vc.chan.device_node) { + list_del(&chan->vc.chan.device_node); kfree(chan); } } @@ -1791,7 +1806,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) seq_printf(s, "\nPL08x virtual memcpy channels:\n"); seq_printf(s, "CHANNEL:\tSTATE:\n"); seq_printf(s, "--------\t------\n"); - list_for_each_entry(chan, &pl08x->memcpy.channels, chan.device_node) { + list_for_each_entry(chan, &pl08x->memcpy.channels, vc.chan.device_node) { seq_printf(s, "%s\t\t%s\n", chan->name, pl08x_state_str(chan->state)); } @@ -1799,7 +1814,7 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data) seq_printf(s, "\nPL08x virtual slave channels:\n"); seq_printf(s, "CHANNEL:\tSTATE:\n"); seq_printf(s, "--------\t------\n"); - list_for_each_entry(chan, &pl08x->slave.channels, chan.device_node) { + list_for_each_entry(chan, &pl08x->slave.channels, vc.chan.device_node) { seq_printf(s, "%s\t\t%s\n", chan->name, pl08x_state_str(chan->state)); } @@ -1851,9 +1866,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) goto out_no_pl08x; } - pm_runtime_set_active(&adev->dev); - pm_runtime_enable(&adev->dev); - /* Initialize memcpy engine */ dma_cap_set(DMA_MEMCPY, pl08x->memcpy.cap_mask); pl08x->memcpy.dev = &adev->dev; @@ -1903,8 +1915,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) goto out_no_lli_pool; } - spin_lock_init(&pl08x->lock); - pl08x->base = ioremap(adev->res.start, resource_size(&adev->res)); if (!pl08x->base) { ret = -ENOMEM; @@ -1942,7 +1952,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) ch->id = i; ch->base = pl08x->base + PL080_Cx_BASE(i); spin_lock_init(&ch->lock); - ch->signal = -1; /* * Nomadik variants can have channels that are locked @@ -2007,7 +2016,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) amba_part(adev), amba_rev(adev), (unsigned long long)adev->res.start, adev->irq[0]); - pm_runtime_put(&adev->dev); return 0; out_no_slave_reg: @@ -2026,9 +2034,6 @@ out_no_ioremap: dma_pool_destroy(pl08x->pool); out_no_lli_pool: out_no_platdata: - pm_runtime_put(&adev->dev); - pm_runtime_disable(&adev->dev); - kfree(pl08x); out_no_pl08x: amba_release_regions(adev); diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c new file mode 100644 index 000000000000..ae0561826137 --- /dev/null +++ b/drivers/dma/omap-dma.c @@ -0,0 +1,669 @@ +/* + * OMAP DMAengine support + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/omap-dma.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include "virt-dma.h" +#include <plat/dma.h> + +struct omap_dmadev { + struct dma_device ddev; + spinlock_t lock; + struct tasklet_struct task; + struct list_head pending; +}; + +struct omap_chan { + struct virt_dma_chan vc; + struct list_head node; + + struct dma_slave_config cfg; + unsigned dma_sig; + bool cyclic; + + int dma_ch; + struct omap_desc *desc; + unsigned sgidx; +}; + +struct omap_sg { + dma_addr_t addr; + uint32_t en; /* number of elements (24-bit) */ + uint32_t fn; /* number of frames (16-bit) */ +}; + +struct omap_desc { + struct virt_dma_desc vd; + enum dma_transfer_direction dir; + dma_addr_t dev_addr; + + int16_t fi; /* for OMAP_DMA_SYNC_PACKET */ + uint8_t es; /* OMAP_DMA_DATA_TYPE_xxx */ + uint8_t sync_mode; /* OMAP_DMA_SYNC_xxx */ + uint8_t sync_type; /* OMAP_DMA_xxx_SYNC* */ + uint8_t periph_port; /* Peripheral port */ + + unsigned sglen; + struct omap_sg sg[0]; +}; + +static const unsigned es_bytes[] = { + [OMAP_DMA_DATA_TYPE_S8] = 1, + [OMAP_DMA_DATA_TYPE_S16] = 2, + [OMAP_DMA_DATA_TYPE_S32] = 4, +}; + +static inline struct omap_dmadev *to_omap_dma_dev(struct dma_device *d) +{ + return container_of(d, struct omap_dmadev, ddev); +} + +static inline struct omap_chan *to_omap_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct omap_chan, vc.chan); +} + +static inline struct omap_desc *to_omap_dma_desc(struct dma_async_tx_descriptor *t) +{ + return container_of(t, struct omap_desc, vd.tx); +} + +static void omap_dma_desc_free(struct virt_dma_desc *vd) +{ + kfree(container_of(vd, struct omap_desc, vd)); +} + +static void omap_dma_start_sg(struct omap_chan *c, struct omap_desc *d, + unsigned idx) +{ + struct omap_sg *sg = d->sg + idx; + + if (d->dir == DMA_DEV_TO_MEM) + omap_set_dma_dest_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, + OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); + else + omap_set_dma_src_params(c->dma_ch, OMAP_DMA_PORT_EMIFF, + OMAP_DMA_AMODE_POST_INC, sg->addr, 0, 0); + + omap_set_dma_transfer_params(c->dma_ch, d->es, sg->en, sg->fn, + d->sync_mode, c->dma_sig, d->sync_type); + + omap_start_dma(c->dma_ch); +} + +static void omap_dma_start_desc(struct omap_chan *c) +{ + struct virt_dma_desc *vd = vchan_next_desc(&c->vc); + struct omap_desc *d; + + if (!vd) { + c->desc = NULL; + return; + } + + list_del(&vd->node); + + c->desc = d = to_omap_dma_desc(&vd->tx); + c->sgidx = 0; + + if (d->dir == DMA_DEV_TO_MEM) + omap_set_dma_src_params(c->dma_ch, d->periph_port, + OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); + else + omap_set_dma_dest_params(c->dma_ch, d->periph_port, + OMAP_DMA_AMODE_CONSTANT, d->dev_addr, 0, d->fi); + + omap_dma_start_sg(c, d, 0); +} + +static void omap_dma_callback(int ch, u16 status, void *data) +{ + struct omap_chan *c = data; + struct omap_desc *d; + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + d = c->desc; + if (d) { + if (!c->cyclic) { + if (++c->sgidx < d->sglen) { + omap_dma_start_sg(c, d, c->sgidx); + } else { + omap_dma_start_desc(c); + vchan_cookie_complete(&d->vd); + } + } else { + vchan_cyclic_callback(&d->vd); + } + } + spin_unlock_irqrestore(&c->vc.lock, flags); +} + +/* + * This callback schedules all pending channels. We could be more + * clever here by postponing allocation of the real DMA channels to + * this point, and freeing them when our virtual channel becomes idle. + * + * We would then need to deal with 'all channels in-use' + */ +static void omap_dma_sched(unsigned long data) +{ + struct omap_dmadev *d = (struct omap_dmadev *)data; + LIST_HEAD(head); + + spin_lock_irq(&d->lock); + list_splice_tail_init(&d->pending, &head); + spin_unlock_irq(&d->lock); + + while (!list_empty(&head)) { + struct omap_chan *c = list_first_entry(&head, + struct omap_chan, node); + + spin_lock_irq(&c->vc.lock); + list_del_init(&c->node); + omap_dma_start_desc(c); + spin_unlock_irq(&c->vc.lock); + } +} + +static int omap_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + + dev_info(c->vc.chan.device->dev, "allocating channel for %u\n", c->dma_sig); + + return omap_request_dma(c->dma_sig, "DMA engine", + omap_dma_callback, c, &c->dma_ch); +} + +static void omap_dma_free_chan_resources(struct dma_chan *chan) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + + vchan_free_chan_resources(&c->vc); + omap_free_dma(c->dma_ch); + + dev_info(c->vc.chan.device->dev, "freeing channel for %u\n", c->dma_sig); +} + +static size_t omap_dma_sg_size(struct omap_sg *sg) +{ + return sg->en * sg->fn; +} + +static size_t omap_dma_desc_size(struct omap_desc *d) +{ + unsigned i; + size_t size; + + for (size = i = 0; i < d->sglen; i++) + size += omap_dma_sg_size(&d->sg[i]); + + return size * es_bytes[d->es]; +} + +static size_t omap_dma_desc_size_pos(struct omap_desc *d, dma_addr_t addr) +{ + unsigned i; + size_t size, es_size = es_bytes[d->es]; + + for (size = i = 0; i < d->sglen; i++) { + size_t this_size = omap_dma_sg_size(&d->sg[i]) * es_size; + + if (size) + size += this_size; + else if (addr >= d->sg[i].addr && + addr < d->sg[i].addr + this_size) + size += d->sg[i].addr + this_size - addr; + } + return size; +} + +static enum dma_status omap_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + struct virt_dma_desc *vd; + enum dma_status ret; + unsigned long flags; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_SUCCESS || !txstate) + return ret; + + spin_lock_irqsave(&c->vc.lock, flags); + vd = vchan_find_desc(&c->vc, cookie); + if (vd) { + txstate->residue = omap_dma_desc_size(to_omap_dma_desc(&vd->tx)); + } else if (c->desc && c->desc->vd.tx.cookie == cookie) { + struct omap_desc *d = c->desc; + dma_addr_t pos; + + if (d->dir == DMA_MEM_TO_DEV) + pos = omap_get_dma_src_pos(c->dma_ch); + else if (d->dir == DMA_DEV_TO_MEM) + pos = omap_get_dma_dst_pos(c->dma_ch); + else + pos = 0; + + txstate->residue = omap_dma_desc_size_pos(d, pos); + } else { + txstate->residue = 0; + } + spin_unlock_irqrestore(&c->vc.lock, flags); + + return ret; +} + +static void omap_dma_issue_pending(struct dma_chan *chan) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&c->vc.lock, flags); + if (vchan_issue_pending(&c->vc) && !c->desc) { + struct omap_dmadev *d = to_omap_dma_dev(chan->device); + spin_lock(&d->lock); + if (list_empty(&c->node)) + list_add_tail(&c->node, &d->pending); + spin_unlock(&d->lock); + tasklet_schedule(&d->task); + } + spin_unlock_irqrestore(&c->vc.lock, flags); +} + +static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, unsigned sglen, + enum dma_transfer_direction dir, unsigned long tx_flags, void *context) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + enum dma_slave_buswidth dev_width; + struct scatterlist *sgent; + struct omap_desc *d; + dma_addr_t dev_addr; + unsigned i, j = 0, es, en, frame_bytes, sync_type; + u32 burst; + + if (dir == DMA_DEV_TO_MEM) { + dev_addr = c->cfg.src_addr; + dev_width = c->cfg.src_addr_width; + burst = c->cfg.src_maxburst; + sync_type = OMAP_DMA_SRC_SYNC; + } else if (dir == DMA_MEM_TO_DEV) { + dev_addr = c->cfg.dst_addr; + dev_width = c->cfg.dst_addr_width; + burst = c->cfg.dst_maxburst; + sync_type = OMAP_DMA_DST_SYNC; + } else { + dev_err(chan->device->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + /* Bus width translates to the element size (ES) */ + switch (dev_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + es = OMAP_DMA_DATA_TYPE_S8; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + es = OMAP_DMA_DATA_TYPE_S16; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + es = OMAP_DMA_DATA_TYPE_S32; + break; + default: /* not reached */ + return NULL; + } + + /* Now allocate and setup the descriptor. */ + d = kzalloc(sizeof(*d) + sglen * sizeof(d->sg[0]), GFP_ATOMIC); + if (!d) + return NULL; + + d->dir = dir; + d->dev_addr = dev_addr; + d->es = es; + d->sync_mode = OMAP_DMA_SYNC_FRAME; + d->sync_type = sync_type; + d->periph_port = OMAP_DMA_PORT_TIPB; + + /* + * Build our scatterlist entries: each contains the address, + * the number of elements (EN) in each frame, and the number of + * frames (FN). Number of bytes for this entry = ES * EN * FN. + * + * Burst size translates to number of elements with frame sync. + * Note: DMA engine defines burst to be the number of dev-width + * transfers. + */ + en = burst; + frame_bytes = es_bytes[es] * en; + for_each_sg(sgl, sgent, sglen, i) { + d->sg[j].addr = sg_dma_address(sgent); + d->sg[j].en = en; + d->sg[j].fn = sg_dma_len(sgent) / frame_bytes; + j++; + } + + d->sglen = j; + + return vchan_tx_prep(&c->vc, &d->vd, tx_flags); +} + +static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction dir, void *context) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + enum dma_slave_buswidth dev_width; + struct omap_desc *d; + dma_addr_t dev_addr; + unsigned es, sync_type; + u32 burst; + + if (dir == DMA_DEV_TO_MEM) { + dev_addr = c->cfg.src_addr; + dev_width = c->cfg.src_addr_width; + burst = c->cfg.src_maxburst; + sync_type = OMAP_DMA_SRC_SYNC; + } else if (dir == DMA_MEM_TO_DEV) { + dev_addr = c->cfg.dst_addr; + dev_width = c->cfg.dst_addr_width; + burst = c->cfg.dst_maxburst; + sync_type = OMAP_DMA_DST_SYNC; + } else { + dev_err(chan->device->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + /* Bus width translates to the element size (ES) */ + switch (dev_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + es = OMAP_DMA_DATA_TYPE_S8; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + es = OMAP_DMA_DATA_TYPE_S16; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + es = OMAP_DMA_DATA_TYPE_S32; + break; + default: /* not reached */ + return NULL; + } + + /* Now allocate and setup the descriptor. */ + d = kzalloc(sizeof(*d) + sizeof(d->sg[0]), GFP_ATOMIC); + if (!d) + return NULL; + + d->dir = dir; + d->dev_addr = dev_addr; + d->fi = burst; + d->es = es; + d->sync_mode = OMAP_DMA_SYNC_PACKET; + d->sync_type = sync_type; + d->periph_port = OMAP_DMA_PORT_MPUI; + d->sg[0].addr = buf_addr; + d->sg[0].en = period_len / es_bytes[es]; + d->sg[0].fn = buf_len / period_len; + d->sglen = 1; + + if (!c->cyclic) { + c->cyclic = true; + omap_dma_link_lch(c->dma_ch, c->dma_ch); + omap_enable_dma_irq(c->dma_ch, OMAP_DMA_FRAME_IRQ); + omap_disable_dma_irq(c->dma_ch, OMAP_DMA_BLOCK_IRQ); + } + + if (!cpu_class_is_omap1()) { + omap_set_dma_src_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); + omap_set_dma_dest_burst_mode(c->dma_ch, OMAP_DMA_DATA_BURST_16); + } + + return vchan_tx_prep(&c->vc, &d->vd, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); +} + +static int omap_dma_slave_config(struct omap_chan *c, struct dma_slave_config *cfg) +{ + if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES || + cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES) + return -EINVAL; + + memcpy(&c->cfg, cfg, sizeof(c->cfg)); + + return 0; +} + +static int omap_dma_terminate_all(struct omap_chan *c) +{ + struct omap_dmadev *d = to_omap_dma_dev(c->vc.chan.device); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&c->vc.lock, flags); + + /* Prevent this channel being scheduled */ + spin_lock(&d->lock); + list_del_init(&c->node); + spin_unlock(&d->lock); + + /* + * Stop DMA activity: we assume the callback will not be called + * after omap_stop_dma() returns (even if it does, it will see + * c->desc is NULL and exit.) + */ + if (c->desc) { + c->desc = NULL; + omap_stop_dma(c->dma_ch); + } + + if (c->cyclic) { + c->cyclic = false; + omap_dma_unlink_lch(c->dma_ch, c->dma_ch); + } + + vchan_get_all_descriptors(&c->vc, &head); + spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); + + return 0; +} + +static int omap_dma_pause(struct omap_chan *c) +{ + /* FIXME: not supported by platform private API */ + return -EINVAL; +} + +static int omap_dma_resume(struct omap_chan *c) +{ + /* FIXME: not supported by platform private API */ + return -EINVAL; +} + +static int omap_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, + unsigned long arg) +{ + struct omap_chan *c = to_omap_dma_chan(chan); + int ret; + + switch (cmd) { + case DMA_SLAVE_CONFIG: + ret = omap_dma_slave_config(c, (struct dma_slave_config *)arg); + break; + + case DMA_TERMINATE_ALL: + ret = omap_dma_terminate_all(c); + break; + + case DMA_PAUSE: + ret = omap_dma_pause(c); + break; + + case DMA_RESUME: + ret = omap_dma_resume(c); + break; + + default: + ret = -ENXIO; + break; + } + + return ret; +} + +static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig) +{ + struct omap_chan *c; + + c = kzalloc(sizeof(*c), GFP_KERNEL); + if (!c) + return -ENOMEM; + + c->dma_sig = dma_sig; + c->vc.desc_free = omap_dma_desc_free; + vchan_init(&c->vc, &od->ddev); + INIT_LIST_HEAD(&c->node); + + od->ddev.chancnt++; + + return 0; +} + +static void omap_dma_free(struct omap_dmadev *od) +{ + tasklet_kill(&od->task); + while (!list_empty(&od->ddev.channels)) { + struct omap_chan *c = list_first_entry(&od->ddev.channels, + struct omap_chan, vc.chan.device_node); + + list_del(&c->vc.chan.device_node); + tasklet_kill(&c->vc.task); + kfree(c); + } + kfree(od); +} + +static int omap_dma_probe(struct platform_device *pdev) +{ + struct omap_dmadev *od; + int rc, i; + + od = kzalloc(sizeof(*od), GFP_KERNEL); + if (!od) + return -ENOMEM; + + dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); + dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); + od->ddev.device_alloc_chan_resources = omap_dma_alloc_chan_resources; + od->ddev.device_free_chan_resources = omap_dma_free_chan_resources; + od->ddev.device_tx_status = omap_dma_tx_status; + od->ddev.device_issue_pending = omap_dma_issue_pending; + od->ddev.device_prep_slave_sg = omap_dma_prep_slave_sg; + od->ddev.device_prep_dma_cyclic = omap_dma_prep_dma_cyclic; + od->ddev.device_control = omap_dma_control; + od->ddev.dev = &pdev->dev; + INIT_LIST_HEAD(&od->ddev.channels); + INIT_LIST_HEAD(&od->pending); + spin_lock_init(&od->lock); + + tasklet_init(&od->task, omap_dma_sched, (unsigned long)od); + + for (i = 0; i < 127; i++) { + rc = omap_dma_chan_init(od, i); + if (rc) { + omap_dma_free(od); + return rc; + } + } + + rc = dma_async_device_register(&od->ddev); + if (rc) { + pr_warn("OMAP-DMA: failed to register slave DMA engine device: %d\n", + rc); + omap_dma_free(od); + } else { + platform_set_drvdata(pdev, od); + } + + dev_info(&pdev->dev, "OMAP DMA engine driver\n"); + + return rc; +} + +static int omap_dma_remove(struct platform_device *pdev) +{ + struct omap_dmadev *od = platform_get_drvdata(pdev); + + dma_async_device_unregister(&od->ddev); + omap_dma_free(od); + + return 0; +} + +static struct platform_driver omap_dma_driver = { + .probe = omap_dma_probe, + .remove = omap_dma_remove, + .driver = { + .name = "omap-dma-engine", + .owner = THIS_MODULE, + }, +}; + +bool omap_dma_filter_fn(struct dma_chan *chan, void *param) +{ + if (chan->device->dev->driver == &omap_dma_driver.driver) { + struct omap_chan *c = to_omap_dma_chan(chan); + unsigned req = *(unsigned *)param; + + return req == c->dma_sig; + } + return false; +} +EXPORT_SYMBOL_GPL(omap_dma_filter_fn); + +static struct platform_device *pdev; + +static const struct platform_device_info omap_dma_dev_info = { + .name = "omap-dma-engine", + .id = -1, + .dma_mask = DMA_BIT_MASK(32), +}; + +static int omap_dma_init(void) +{ + int rc = platform_driver_register(&omap_dma_driver); + + if (rc == 0) { + pdev = platform_device_register_full(&omap_dma_dev_info); + if (IS_ERR(pdev)) { + platform_driver_unregister(&omap_dma_driver); + rc = PTR_ERR(pdev); + } + } + return rc; +} +subsys_initcall(omap_dma_init); + +static void __exit omap_dma_exit(void) +{ + platform_device_unregister(pdev); + platform_driver_unregister(&omap_dma_driver); +} +module_exit(omap_dma_exit); + +MODULE_AUTHOR("Russell King"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c index ec78ccef9132..f5a73606217e 100644 --- a/drivers/dma/sa11x0-dma.c +++ b/drivers/dma/sa11x0-dma.c @@ -21,6 +21,8 @@ #include <linux/slab.h> #include <linux/spinlock.h> +#include "virt-dma.h" + #define NR_PHY_CHAN 6 #define DMA_ALIGN 3 #define DMA_MAX_SIZE 0x1fff @@ -72,12 +74,13 @@ struct sa11x0_dma_sg { }; struct sa11x0_dma_desc { - struct dma_async_tx_descriptor tx; + struct virt_dma_desc vd; + u32 ddar; size_t size; + unsigned period; + bool cyclic; - /* maybe protected by c->lock */ - struct list_head node; unsigned sglen; struct sa11x0_dma_sg sg[0]; }; @@ -85,15 +88,11 @@ struct sa11x0_dma_desc { struct sa11x0_dma_phy; struct sa11x0_dma_chan { - struct dma_chan chan; - spinlock_t lock; - dma_cookie_t lc; + struct virt_dma_chan vc; - /* protected by c->lock */ + /* protected by c->vc.lock */ struct sa11x0_dma_phy *phy; enum dma_status status; - struct list_head desc_submitted; - struct list_head desc_issued; /* protected by d->lock */ struct list_head node; @@ -109,7 +108,7 @@ struct sa11x0_dma_phy { struct sa11x0_dma_chan *vchan; - /* Protected by c->lock */ + /* Protected by c->vc.lock */ unsigned sg_load; struct sa11x0_dma_desc *txd_load; unsigned sg_done; @@ -127,13 +126,12 @@ struct sa11x0_dma_dev { spinlock_t lock; struct tasklet_struct task; struct list_head chan_pending; - struct list_head desc_complete; struct sa11x0_dma_phy phy[NR_PHY_CHAN]; }; static struct sa11x0_dma_chan *to_sa11x0_dma_chan(struct dma_chan *chan) { - return container_of(chan, struct sa11x0_dma_chan, chan); + return container_of(chan, struct sa11x0_dma_chan, vc.chan); } static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) @@ -141,27 +139,26 @@ static struct sa11x0_dma_dev *to_sa11x0_dma(struct dma_device *dmadev) return container_of(dmadev, struct sa11x0_dma_dev, slave); } -static struct sa11x0_dma_desc *to_sa11x0_dma_tx(struct dma_async_tx_descriptor *tx) +static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) { - return container_of(tx, struct sa11x0_dma_desc, tx); + struct virt_dma_desc *vd = vchan_next_desc(&c->vc); + + return vd ? container_of(vd, struct sa11x0_dma_desc, vd) : NULL; } -static struct sa11x0_dma_desc *sa11x0_dma_next_desc(struct sa11x0_dma_chan *c) +static void sa11x0_dma_free_desc(struct virt_dma_desc *vd) { - if (list_empty(&c->desc_issued)) - return NULL; - - return list_first_entry(&c->desc_issued, struct sa11x0_dma_desc, node); + kfree(container_of(vd, struct sa11x0_dma_desc, vd)); } static void sa11x0_dma_start_desc(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd) { - list_del(&txd->node); + list_del(&txd->vd.node); p->txd_load = txd; p->sg_load = 0; dev_vdbg(p->dev->slave.dev, "pchan %u: txd %p[%x]: starting: DDAR:%x\n", - p->num, txd, txd->tx.cookie, txd->ddar); + p->num, &txd->vd, txd->vd.tx.cookie, txd->ddar); } static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, @@ -183,19 +180,24 @@ static void noinline sa11x0_dma_start_sg(struct sa11x0_dma_phy *p, return; if (p->sg_load == txd->sglen) { - struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); + if (!txd->cyclic) { + struct sa11x0_dma_desc *txn = sa11x0_dma_next_desc(c); - /* - * We have reached the end of the current descriptor. - * Peek at the next descriptor, and if compatible with - * the current, start processing it. - */ - if (txn && txn->ddar == txd->ddar) { - txd = txn; - sa11x0_dma_start_desc(p, txn); + /* + * We have reached the end of the current descriptor. + * Peek at the next descriptor, and if compatible with + * the current, start processing it. + */ + if (txn && txn->ddar == txd->ddar) { + txd = txn; + sa11x0_dma_start_desc(p, txn); + } else { + p->txd_load = NULL; + return; + } } else { - p->txd_load = NULL; - return; + /* Cyclic: reset back to beginning */ + p->sg_load = 0; } } @@ -229,21 +231,21 @@ static void noinline sa11x0_dma_complete(struct sa11x0_dma_phy *p, struct sa11x0_dma_desc *txd = p->txd_done; if (++p->sg_done == txd->sglen) { - struct sa11x0_dma_dev *d = p->dev; - - dev_vdbg(d->slave.dev, "pchan %u: txd %p[%x]: completed\n", - p->num, p->txd_done, p->txd_done->tx.cookie); - - c->lc = txd->tx.cookie; + if (!txd->cyclic) { + vchan_cookie_complete(&txd->vd); - spin_lock(&d->lock); - list_add_tail(&txd->node, &d->desc_complete); - spin_unlock(&d->lock); + p->sg_done = 0; + p->txd_done = p->txd_load; - p->sg_done = 0; - p->txd_done = p->txd_load; + if (!p->txd_done) + tasklet_schedule(&p->dev->task); + } else { + if ((p->sg_done % txd->period) == 0) + vchan_cyclic_callback(&txd->vd); - tasklet_schedule(&d->task); + /* Cyclic: reset back to beginning */ + p->sg_done = 0; + } } sa11x0_dma_start_sg(p, c); @@ -280,7 +282,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) if (c) { unsigned long flags; - spin_lock_irqsave(&c->lock, flags); + spin_lock_irqsave(&c->vc.lock, flags); /* * Now that we're holding the lock, check that the vchan * really is associated with this pchan before touching the @@ -294,7 +296,7 @@ static irqreturn_t sa11x0_dma_irq(int irq, void *dev_id) if (dcsr & DCSR_DONEB) sa11x0_dma_complete(p, c); } - spin_unlock_irqrestore(&c->lock, flags); + spin_unlock_irqrestore(&c->vc.lock, flags); } return IRQ_HANDLED; @@ -332,28 +334,15 @@ static void sa11x0_dma_tasklet(unsigned long arg) struct sa11x0_dma_dev *d = (struct sa11x0_dma_dev *)arg; struct sa11x0_dma_phy *p; struct sa11x0_dma_chan *c; - struct sa11x0_dma_desc *txd, *txn; - LIST_HEAD(head); unsigned pch, pch_alloc = 0; dev_dbg(d->slave.dev, "tasklet enter\n"); - /* Get the completed tx descriptors */ - spin_lock_irq(&d->lock); - list_splice_init(&d->desc_complete, &head); - spin_unlock_irq(&d->lock); - - list_for_each_entry(txd, &head, node) { - c = to_sa11x0_dma_chan(txd->tx.chan); - - dev_dbg(d->slave.dev, "vchan %p: txd %p[%x] completed\n", - c, txd, txd->tx.cookie); - - spin_lock_irq(&c->lock); + list_for_each_entry(c, &d->slave.channels, vc.chan.device_node) { + spin_lock_irq(&c->vc.lock); p = c->phy; - if (p) { - if (!p->txd_done) - sa11x0_dma_start_txd(c); + if (p && !p->txd_done) { + sa11x0_dma_start_txd(c); if (!p->txd_done) { /* No current txd associated with this channel */ dev_dbg(d->slave.dev, "pchan %u: free\n", p->num); @@ -363,7 +352,7 @@ static void sa11x0_dma_tasklet(unsigned long arg) p->vchan = NULL; } } - spin_unlock_irq(&c->lock); + spin_unlock_irq(&c->vc.lock); } spin_lock_irq(&d->lock); @@ -380,7 +369,7 @@ static void sa11x0_dma_tasklet(unsigned long arg) /* Mark this channel allocated */ p->vchan = c; - dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, c); + dev_dbg(d->slave.dev, "pchan %u: alloc vchan %p\n", pch, &c->vc); } } spin_unlock_irq(&d->lock); @@ -390,42 +379,18 @@ static void sa11x0_dma_tasklet(unsigned long arg) p = &d->phy[pch]; c = p->vchan; - spin_lock_irq(&c->lock); + spin_lock_irq(&c->vc.lock); c->phy = p; sa11x0_dma_start_txd(c); - spin_unlock_irq(&c->lock); + spin_unlock_irq(&c->vc.lock); } } - /* Now free the completed tx descriptor, and call their callbacks */ - list_for_each_entry_safe(txd, txn, &head, node) { - dma_async_tx_callback callback = txd->tx.callback; - void *callback_param = txd->tx.callback_param; - - dev_dbg(d->slave.dev, "txd %p[%x]: callback and free\n", - txd, txd->tx.cookie); - - kfree(txd); - - if (callback) - callback(callback_param); - } - dev_dbg(d->slave.dev, "tasklet exit\n"); } -static void sa11x0_dma_desc_free(struct sa11x0_dma_dev *d, struct list_head *head) -{ - struct sa11x0_dma_desc *txd, *txn; - - list_for_each_entry_safe(txd, txn, head, node) { - dev_dbg(d->slave.dev, "txd %p: freeing\n", txd); - kfree(txd); - } -} - static int sa11x0_dma_alloc_chan_resources(struct dma_chan *chan) { return 0; @@ -436,18 +401,12 @@ static void sa11x0_dma_free_chan_resources(struct dma_chan *chan) struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); unsigned long flags; - LIST_HEAD(head); - spin_lock_irqsave(&c->lock, flags); - spin_lock(&d->lock); + spin_lock_irqsave(&d->lock, flags); list_del_init(&c->node); - spin_unlock(&d->lock); - - list_splice_tail_init(&c->desc_submitted, &head); - list_splice_tail_init(&c->desc_issued, &head); - spin_unlock_irqrestore(&c->lock, flags); + spin_unlock_irqrestore(&d->lock, flags); - sa11x0_dma_desc_free(d, &head); + vchan_free_chan_resources(&c->vc); } static dma_addr_t sa11x0_dma_pos(struct sa11x0_dma_phy *p) @@ -472,33 +431,47 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); struct sa11x0_dma_phy *p; - struct sa11x0_dma_desc *txd; - dma_cookie_t last_used, last_complete; + struct virt_dma_desc *vd; unsigned long flags; enum dma_status ret; - size_t bytes = 0; - - last_used = c->chan.cookie; - last_complete = c->lc; - ret = dma_async_is_complete(cookie, last_complete, last_used); - if (ret == DMA_SUCCESS) { - dma_set_tx_state(state, last_complete, last_used, 0); + ret = dma_cookie_status(&c->vc.chan, cookie, state); + if (ret == DMA_SUCCESS) return ret; - } - spin_lock_irqsave(&c->lock, flags); + if (!state) + return c->status; + + spin_lock_irqsave(&c->vc.lock, flags); p = c->phy; - ret = c->status; - if (p) { - dma_addr_t addr = sa11x0_dma_pos(p); - dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); + /* + * If the cookie is on our issue queue, then the residue is + * its total size. + */ + vd = vchan_find_desc(&c->vc, cookie); + if (vd) { + state->residue = container_of(vd, struct sa11x0_dma_desc, vd)->size; + } else if (!p) { + state->residue = 0; + } else { + struct sa11x0_dma_desc *txd; + size_t bytes = 0; - txd = p->txd_done; + if (p->txd_done && p->txd_done->vd.tx.cookie == cookie) + txd = p->txd_done; + else if (p->txd_load && p->txd_load->vd.tx.cookie == cookie) + txd = p->txd_load; + else + txd = NULL; + + ret = c->status; if (txd) { + dma_addr_t addr = sa11x0_dma_pos(p); unsigned i; + dev_vdbg(d->slave.dev, "tx_status: addr:%x\n", addr); + for (i = 0; i < txd->sglen; i++) { dev_vdbg(d->slave.dev, "tx_status: [%u] %x+%x\n", i, txd->sg[i].addr, txd->sg[i].len); @@ -521,17 +494,11 @@ static enum dma_status sa11x0_dma_tx_status(struct dma_chan *chan, bytes += txd->sg[i].len; } } - if (txd != p->txd_load && p->txd_load) - bytes += p->txd_load->size; - } - list_for_each_entry(txd, &c->desc_issued, node) { - bytes += txd->size; + state->residue = bytes; } - spin_unlock_irqrestore(&c->lock, flags); - - dma_set_tx_state(state, last_complete, last_used, bytes); + spin_unlock_irqrestore(&c->vc.lock, flags); - dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", bytes); + dev_vdbg(d->slave.dev, "tx_status: bytes 0x%zx\n", state->residue); return ret; } @@ -547,40 +514,20 @@ static void sa11x0_dma_issue_pending(struct dma_chan *chan) struct sa11x0_dma_dev *d = to_sa11x0_dma(chan->device); unsigned long flags; - spin_lock_irqsave(&c->lock, flags); - list_splice_tail_init(&c->desc_submitted, &c->desc_issued); - if (!list_empty(&c->desc_issued)) { - spin_lock(&d->lock); - if (!c->phy && list_empty(&c->node)) { - list_add_tail(&c->node, &d->chan_pending); - tasklet_schedule(&d->task); - dev_dbg(d->slave.dev, "vchan %p: issued\n", c); + spin_lock_irqsave(&c->vc.lock, flags); + if (vchan_issue_pending(&c->vc)) { + if (!c->phy) { + spin_lock(&d->lock); + if (list_empty(&c->node)) { + list_add_tail(&c->node, &d->chan_pending); + tasklet_schedule(&d->task); + dev_dbg(d->slave.dev, "vchan %p: issued\n", &c->vc); + } + spin_unlock(&d->lock); } - spin_unlock(&d->lock); } else - dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", c); - spin_unlock_irqrestore(&c->lock, flags); -} - -static dma_cookie_t sa11x0_dma_tx_submit(struct dma_async_tx_descriptor *tx) -{ - struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(tx->chan); - struct sa11x0_dma_desc *txd = to_sa11x0_dma_tx(tx); - unsigned long flags; - - spin_lock_irqsave(&c->lock, flags); - c->chan.cookie += 1; - if (c->chan.cookie < 0) - c->chan.cookie = 1; - txd->tx.cookie = c->chan.cookie; - - list_add_tail(&txd->node, &c->desc_submitted); - spin_unlock_irqrestore(&c->lock, flags); - - dev_dbg(tx->chan->device->dev, "vchan %p: txd %p[%x]: submitted\n", - c, txd, txd->tx.cookie); - - return txd->tx.cookie; + dev_dbg(d->slave.dev, "vchan %p: nothing to issue\n", &c->vc); + spin_unlock_irqrestore(&c->vc.lock, flags); } static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( @@ -596,7 +543,7 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( /* SA11x0 channels can only operate in their native direction */ if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", - c, c->ddar, dir); + &c->vc, c->ddar, dir); return NULL; } @@ -612,14 +559,14 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( j += DIV_ROUND_UP(len, DMA_MAX_SIZE & ~DMA_ALIGN) - 1; if (addr & DMA_ALIGN) { dev_dbg(chan->device->dev, "vchan %p: bad buffer alignment: %08x\n", - c, addr); + &c->vc, addr); return NULL; } } txd = kzalloc(sizeof(*txd) + j * sizeof(txd->sg[0]), GFP_ATOMIC); if (!txd) { - dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", c); + dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); return NULL; } @@ -655,17 +602,73 @@ static struct dma_async_tx_descriptor *sa11x0_dma_prep_slave_sg( } while (len); } - dma_async_tx_descriptor_init(&txd->tx, &c->chan); - txd->tx.flags = flags; - txd->tx.tx_submit = sa11x0_dma_tx_submit; txd->ddar = c->ddar; txd->size = size; txd->sglen = j; dev_dbg(chan->device->dev, "vchan %p: txd %p: size %u nr %u\n", - c, txd, txd->size, txd->sglen); + &c->vc, &txd->vd, txd->size, txd->sglen); - return &txd->tx; + return vchan_tx_prep(&c->vc, &txd->vd, flags); +} + +static struct dma_async_tx_descriptor *sa11x0_dma_prep_dma_cyclic( + struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period, + enum dma_transfer_direction dir, void *context) +{ + struct sa11x0_dma_chan *c = to_sa11x0_dma_chan(chan); + struct sa11x0_dma_desc *txd; + unsigned i, j, k, sglen, sgperiod; + + /* SA11x0 channels can only operate in their native direction */ + if (dir != (c->ddar & DDAR_RW ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV)) { + dev_err(chan->device->dev, "vchan %p: bad DMA direction: DDAR:%08x dir:%u\n", + &c->vc, c->ddar, dir); + return NULL; + } + + sgperiod = DIV_ROUND_UP(period, DMA_MAX_SIZE & ~DMA_ALIGN); + sglen = size * sgperiod / period; + + /* Do not allow zero-sized txds */ + if (sglen == 0) + return NULL; + + txd = kzalloc(sizeof(*txd) + sglen * sizeof(txd->sg[0]), GFP_ATOMIC); + if (!txd) { + dev_dbg(chan->device->dev, "vchan %p: kzalloc failed\n", &c->vc); + return NULL; + } + + for (i = k = 0; i < size / period; i++) { + size_t tlen, len = period; + + for (j = 0; j < sgperiod; j++, k++) { + tlen = len; + + if (tlen > DMA_MAX_SIZE) { + unsigned mult = DIV_ROUND_UP(tlen, DMA_MAX_SIZE & ~DMA_ALIGN); + tlen = (tlen / mult) & ~DMA_ALIGN; + } + + txd->sg[k].addr = addr; + txd->sg[k].len = tlen; + addr += tlen; + len -= tlen; + } + + WARN_ON(len != 0); + } + + WARN_ON(k != sglen); + + txd->ddar = c->ddar; + txd->size = size; + txd->sglen = sglen; + txd->cyclic = 1; + txd->period = sgperiod; + + return vchan_tx_prep(&c->vc, &txd->vd, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); } static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_config *cfg) @@ -695,8 +698,8 @@ static int sa11x0_dma_slave_config(struct sa11x0_dma_chan *c, struct dma_slave_c if (maxburst == 8) ddar |= DDAR_BS; - dev_dbg(c->chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", - c, addr, width, maxburst); + dev_dbg(c->vc.chan.device->dev, "vchan %p: dma_slave_config addr %x width %u burst %u\n", + &c->vc, addr, width, maxburst); c->ddar = ddar | (addr & 0xf0000000) | (addr & 0x003ffffc) << 6; @@ -718,16 +721,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, return sa11x0_dma_slave_config(c, (struct dma_slave_config *)arg); case DMA_TERMINATE_ALL: - dev_dbg(d->slave.dev, "vchan %p: terminate all\n", c); + dev_dbg(d->slave.dev, "vchan %p: terminate all\n", &c->vc); /* Clear the tx descriptor lists */ - spin_lock_irqsave(&c->lock, flags); - list_splice_tail_init(&c->desc_submitted, &head); - list_splice_tail_init(&c->desc_issued, &head); + spin_lock_irqsave(&c->vc.lock, flags); + vchan_get_all_descriptors(&c->vc, &head); p = c->phy; if (p) { - struct sa11x0_dma_desc *txd, *txn; - dev_dbg(d->slave.dev, "pchan %u: terminating\n", p->num); /* vchan is assigned to a pchan - stop the channel */ writel(DCSR_RUN | DCSR_IE | @@ -735,17 +735,13 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, DCSR_STRTB | DCSR_DONEB, p->base + DMA_DCSR_C); - list_for_each_entry_safe(txd, txn, &d->desc_complete, node) - if (txd->tx.chan == &c->chan) - list_move(&txd->node, &head); - if (p->txd_load) { if (p->txd_load != p->txd_done) - list_add_tail(&p->txd_load->node, &head); + list_add_tail(&p->txd_load->vd.node, &head); p->txd_load = NULL; } if (p->txd_done) { - list_add_tail(&p->txd_done->node, &head); + list_add_tail(&p->txd_done->vd.node, &head); p->txd_done = NULL; } c->phy = NULL; @@ -754,14 +750,14 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, spin_unlock(&d->lock); tasklet_schedule(&d->task); } - spin_unlock_irqrestore(&c->lock, flags); - sa11x0_dma_desc_free(d, &head); + spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); ret = 0; break; case DMA_PAUSE: - dev_dbg(d->slave.dev, "vchan %p: pause\n", c); - spin_lock_irqsave(&c->lock, flags); + dev_dbg(d->slave.dev, "vchan %p: pause\n", &c->vc); + spin_lock_irqsave(&c->vc.lock, flags); if (c->status == DMA_IN_PROGRESS) { c->status = DMA_PAUSED; @@ -774,26 +770,26 @@ static int sa11x0_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, spin_unlock(&d->lock); } } - spin_unlock_irqrestore(&c->lock, flags); + spin_unlock_irqrestore(&c->vc.lock, flags); ret = 0; break; case DMA_RESUME: - dev_dbg(d->slave.dev, "vchan %p: resume\n", c); - spin_lock_irqsave(&c->lock, flags); + dev_dbg(d->slave.dev, "vchan %p: resume\n", &c->vc); + spin_lock_irqsave(&c->vc.lock, flags); if (c->status == DMA_PAUSED) { c->status = DMA_IN_PROGRESS; p = c->phy; if (p) { writel(DCSR_RUN | DCSR_IE, p->base + DMA_DCSR_S); - } else if (!list_empty(&c->desc_issued)) { + } else if (!list_empty(&c->vc.desc_issued)) { spin_lock(&d->lock); list_add_tail(&c->node, &d->chan_pending); spin_unlock(&d->lock); } } - spin_unlock_irqrestore(&c->lock, flags); + spin_unlock_irqrestore(&c->vc.lock, flags); ret = 0; break; @@ -853,15 +849,13 @@ static int __devinit sa11x0_dma_init_dmadev(struct dma_device *dmadev, return -ENOMEM; } - c->chan.device = dmadev; c->status = DMA_IN_PROGRESS; c->ddar = chan_desc[i].ddar; c->name = chan_desc[i].name; - spin_lock_init(&c->lock); - INIT_LIST_HEAD(&c->desc_submitted); - INIT_LIST_HEAD(&c->desc_issued); INIT_LIST_HEAD(&c->node); - list_add_tail(&c->chan.device_node, &dmadev->channels); + + c->vc.desc_free = sa11x0_dma_free_desc; + vchan_init(&c->vc, dmadev); } return dma_async_device_register(dmadev); @@ -890,8 +884,9 @@ static void sa11x0_dma_free_channels(struct dma_device *dmadev) { struct sa11x0_dma_chan *c, *cn; - list_for_each_entry_safe(c, cn, &dmadev->channels, chan.device_node) { - list_del(&c->chan.device_node); + list_for_each_entry_safe(c, cn, &dmadev->channels, vc.chan.device_node) { + list_del(&c->vc.chan.device_node); + tasklet_kill(&c->vc.task); kfree(c); } } @@ -915,7 +910,6 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev) spin_lock_init(&d->lock); INIT_LIST_HEAD(&d->chan_pending); - INIT_LIST_HEAD(&d->desc_complete); d->base = ioremap(res->start, resource_size(res)); if (!d->base) { @@ -947,7 +941,9 @@ static int __devinit sa11x0_dma_probe(struct platform_device *pdev) } dma_cap_set(DMA_SLAVE, d->slave.cap_mask); + dma_cap_set(DMA_CYCLIC, d->slave.cap_mask); d->slave.device_prep_slave_sg = sa11x0_dma_prep_slave_sg; + d->slave.device_prep_dma_cyclic = sa11x0_dma_prep_dma_cyclic; ret = sa11x0_dma_init_dmadev(&d->slave, &pdev->dev); if (ret) { dev_warn(d->slave.dev, "failed to register slave async device: %d\n", diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c new file mode 100644 index 000000000000..6f80432a3f0a --- /dev/null +++ b/drivers/dma/virt-dma.c @@ -0,0 +1,123 @@ +/* + * Virtual DMA channel support for DMAengine + * + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include <linux/device.h> +#include <linux/dmaengine.h> +#include <linux/module.h> +#include <linux/spinlock.h> + +#include "virt-dma.h" + +static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx) +{ + return container_of(tx, struct virt_dma_desc, tx); +} + +dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct virt_dma_chan *vc = to_virt_chan(tx->chan); + struct virt_dma_desc *vd = to_virt_desc(tx); + unsigned long flags; + dma_cookie_t cookie; + + spin_lock_irqsave(&vc->lock, flags); + cookie = dma_cookie_assign(tx); + + list_add_tail(&vd->node, &vc->desc_submitted); + spin_unlock_irqrestore(&vc->lock, flags); + + dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", + vc, vd, cookie); + + return cookie; +} +EXPORT_SYMBOL_GPL(vchan_tx_submit); + +struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, + dma_cookie_t cookie) +{ + struct virt_dma_desc *vd; + + list_for_each_entry(vd, &vc->desc_issued, node) + if (vd->tx.cookie == cookie) + return vd; + + return NULL; +} +EXPORT_SYMBOL_GPL(vchan_find_desc); + +/* + * This tasklet handles the completion of a DMA descriptor by + * calling its callback and freeing it. + */ +static void vchan_complete(unsigned long arg) +{ + struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; + struct virt_dma_desc *vd; + dma_async_tx_callback cb = NULL; + void *cb_data = NULL; + LIST_HEAD(head); + + spin_lock_irq(&vc->lock); + list_splice_tail_init(&vc->desc_completed, &head); + vd = vc->cyclic; + if (vd) { + vc->cyclic = NULL; + cb = vd->tx.callback; + cb_data = vd->tx.callback_param; + } + spin_unlock_irq(&vc->lock); + + if (cb) + cb(cb_data); + + while (!list_empty(&head)) { + vd = list_first_entry(&head, struct virt_dma_desc, node); + cb = vd->tx.callback; + cb_data = vd->tx.callback_param; + + list_del(&vd->node); + + vc->desc_free(vd); + + if (cb) + cb(cb_data); + } +} + +void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) +{ + while (!list_empty(head)) { + struct virt_dma_desc *vd = list_first_entry(head, + struct virt_dma_desc, node); + list_del(&vd->node); + dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); + vc->desc_free(vd); + } +} +EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); + +void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) +{ + dma_cookie_init(&vc->chan); + + spin_lock_init(&vc->lock); + INIT_LIST_HEAD(&vc->desc_submitted); + INIT_LIST_HEAD(&vc->desc_issued); + INIT_LIST_HEAD(&vc->desc_completed); + + tasklet_init(&vc->task, vchan_complete, (unsigned long)vc); + + vc->chan.device = dmadev; + list_add_tail(&vc->chan.device_node, &dmadev->channels); +} +EXPORT_SYMBOL_GPL(vchan_init); + +MODULE_AUTHOR("Russell King"); +MODULE_LICENSE("GPL"); diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h new file mode 100644 index 000000000000..85c19d63f9fb --- /dev/null +++ b/drivers/dma/virt-dma.h @@ -0,0 +1,152 @@ +/* + * Virtual DMA channel support for DMAengine + * + * Copyright (C) 2012 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef VIRT_DMA_H +#define VIRT_DMA_H + +#include <linux/dmaengine.h> +#include <linux/interrupt.h> + +#include "dmaengine.h" + +struct virt_dma_desc { + struct dma_async_tx_descriptor tx; + /* protected by vc.lock */ + struct list_head node; +}; + +struct virt_dma_chan { + struct dma_chan chan; + struct tasklet_struct task; + void (*desc_free)(struct virt_dma_desc *); + + spinlock_t lock; + + /* protected by vc.lock */ + struct list_head desc_submitted; + struct list_head desc_issued; + struct list_head desc_completed; + + struct virt_dma_desc *cyclic; +}; + +static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) +{ + return container_of(chan, struct virt_dma_chan, chan); +} + +void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head); +void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev); +struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t); + +/** + * vchan_tx_prep - prepare a descriptor + * vc: virtual channel allocating this descriptor + * vd: virtual descriptor to prepare + * tx_flags: flags argument passed in to prepare function + */ +static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc, + struct virt_dma_desc *vd, unsigned long tx_flags) +{ + extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); + + dma_async_tx_descriptor_init(&vd->tx, &vc->chan); + vd->tx.flags = tx_flags; + vd->tx.tx_submit = vchan_tx_submit; + + return &vd->tx; +} + +/** + * vchan_issue_pending - move submitted descriptors to issued list + * vc: virtual channel to update + * + * vc.lock must be held by caller + */ +static inline bool vchan_issue_pending(struct virt_dma_chan *vc) +{ + list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued); + return !list_empty(&vc->desc_issued); +} + +/** + * vchan_cookie_complete - report completion of a descriptor + * vd: virtual descriptor to update + * + * vc.lock must be held by caller + */ +static inline void vchan_cookie_complete(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + + dma_cookie_complete(&vd->tx); + dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", + vd, vd->tx.cookie); + list_add_tail(&vd->node, &vc->desc_completed); + + tasklet_schedule(&vc->task); +} + +/** + * vchan_cyclic_callback - report the completion of a period + * vd: virtual descriptor + */ +static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) +{ + struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); + + vc->cyclic = vd; + tasklet_schedule(&vc->task); +} + +/** + * vchan_next_desc - peek at the next descriptor to be processed + * vc: virtual channel to obtain descriptor from + * + * vc.lock must be held by caller + */ +static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) +{ + if (list_empty(&vc->desc_issued)) + return NULL; + + return list_first_entry(&vc->desc_issued, struct virt_dma_desc, node); +} + +/** + * vchan_get_all_descriptors - obtain all submitted and issued descriptors + * vc: virtual channel to get descriptors from + * head: list of descriptors found + * + * vc.lock must be held by caller + * + * Removes all submitted and issued descriptors from internal lists, and + * provides a list of all descriptors found + */ +static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, + struct list_head *head) +{ + list_splice_tail_init(&vc->desc_submitted, head); + list_splice_tail_init(&vc->desc_issued, head); + list_splice_tail_init(&vc->desc_completed, head); +} + +static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) +{ + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&vc->lock, flags); + vchan_get_all_descriptors(vc, &head); + spin_unlock_irqrestore(&vc->lock, flags); + + vchan_dma_desc_free_list(vc, &head); +} + +#endif diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 1eee45b69b71..d949b781f6f8 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -268,13 +268,14 @@ config DM_MIRROR needed for live data migration tools such as 'pvmove'. config DM_RAID - tristate "RAID 1/4/5/6 target" + tristate "RAID 1/4/5/6/10 target" depends on BLK_DEV_DM select MD_RAID1 + select MD_RAID10 select MD_RAID456 select BLK_DEV_MD ---help--- - A dm target that supports RAID1, RAID4, RAID5 and RAID6 mappings + A dm target that supports RAID1, RAID10, RAID4, RAID5 and RAID6 mappings A RAID-5 set of N drives with a capacity of C MB per drive provides the capacity of C * (N - 1) MB, and protects against a failure diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 15dbe03117e4..94e7f6ba2e11 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -1305,7 +1305,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect prepare_to_wait(&bitmap->overflow_wait, &__wait, TASK_UNINTERRUPTIBLE); spin_unlock_irq(&bitmap->counts.lock); - io_schedule(); + schedule(); finish_wait(&bitmap->overflow_wait, &__wait); continue; } diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index f2f29c526544..982e3e390c45 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -11,6 +11,7 @@ #include "md.h" #include "raid1.h" #include "raid5.h" +#include "raid10.h" #include "bitmap.h" #include <linux/device-mapper.h> @@ -52,7 +53,10 @@ struct raid_dev { #define DMPF_MAX_RECOVERY_RATE 0x20 #define DMPF_MAX_WRITE_BEHIND 0x40 #define DMPF_STRIPE_CACHE 0x80 -#define DMPF_REGION_SIZE 0X100 +#define DMPF_REGION_SIZE 0x100 +#define DMPF_RAID10_COPIES 0x200 +#define DMPF_RAID10_FORMAT 0x400 + struct raid_set { struct dm_target *ti; @@ -76,6 +80,7 @@ static struct raid_type { const unsigned algorithm; /* RAID algorithm. */ } raid_types[] = { {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */}, + {"raid10", "RAID10 (striped mirrors)", 0, 2, 10, UINT_MAX /* Varies */}, {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0}, {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC}, {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC}, @@ -86,6 +91,17 @@ static struct raid_type { {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE} }; +static unsigned raid10_md_layout_to_copies(int layout) +{ + return layout & 0xFF; +} + +static int raid10_format_to_md_layout(char *format, unsigned copies) +{ + /* 1 "far" copy, and 'copies' "near" copies */ + return (1 << 8) | (copies & 0xFF); +} + static struct raid_type *get_raid_type(char *name) { int i; @@ -339,10 +355,16 @@ static int validate_region_size(struct raid_set *rs, unsigned long region_size) * [max_write_behind <sectors>] See '-write-behind=' (man mdadm) * [stripe_cache <sectors>] Stripe cache size for higher RAIDs * [region_size <sectors>] Defines granularity of bitmap + * + * RAID10-only options: + * [raid10_copies <# copies>] Number of copies. (Default: 2) + * [raid10_format <near>] Layout algorithm. (Default: near) */ static int parse_raid_params(struct raid_set *rs, char **argv, unsigned num_raid_params) { + char *raid10_format = "near"; + unsigned raid10_copies = 2; unsigned i, rebuild_cnt = 0; unsigned long value, region_size = 0; sector_t sectors_per_dev = rs->ti->len; @@ -416,11 +438,28 @@ static int parse_raid_params(struct raid_set *rs, char **argv, } key = argv[i++]; + + /* Parameters that take a string value are checked here. */ + if (!strcasecmp(key, "raid10_format")) { + if (rs->raid_type->level != 10) { + rs->ti->error = "'raid10_format' is an invalid parameter for this RAID type"; + return -EINVAL; + } + if (strcmp("near", argv[i])) { + rs->ti->error = "Invalid 'raid10_format' value given"; + return -EINVAL; + } + raid10_format = argv[i]; + rs->print_flags |= DMPF_RAID10_FORMAT; + continue; + } + if (strict_strtoul(argv[i], 10, &value) < 0) { rs->ti->error = "Bad numerical argument given in raid params"; return -EINVAL; } + /* Parameters that take a numeric value are checked here */ if (!strcasecmp(key, "rebuild")) { rebuild_cnt++; @@ -439,6 +478,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv, return -EINVAL; } break; + case 10: default: DMERR("The rebuild parameter is not supported for %s", rs->raid_type->name); rs->ti->error = "Rebuild not supported for this RAID type"; @@ -495,7 +535,8 @@ static int parse_raid_params(struct raid_set *rs, char **argv, */ value /= 2; - if (rs->raid_type->level < 5) { + if ((rs->raid_type->level != 5) && + (rs->raid_type->level != 6)) { rs->ti->error = "Inappropriate argument: stripe_cache"; return -EINVAL; } @@ -520,6 +561,14 @@ static int parse_raid_params(struct raid_set *rs, char **argv, } else if (!strcasecmp(key, "region_size")) { rs->print_flags |= DMPF_REGION_SIZE; region_size = value; + } else if (!strcasecmp(key, "raid10_copies") && + (rs->raid_type->level == 10)) { + if ((value < 2) || (value > 0xFF)) { + rs->ti->error = "Bad value for 'raid10_copies'"; + return -EINVAL; + } + rs->print_flags |= DMPF_RAID10_COPIES; + raid10_copies = value; } else { DMERR("Unable to parse RAID parameter: %s", key); rs->ti->error = "Unable to parse RAID parameters"; @@ -538,8 +587,22 @@ static int parse_raid_params(struct raid_set *rs, char **argv, if (dm_set_target_max_io_len(rs->ti, max_io_len)) return -EINVAL; - if ((rs->raid_type->level > 1) && - sector_div(sectors_per_dev, (rs->md.raid_disks - rs->raid_type->parity_devs))) { + if (rs->raid_type->level == 10) { + if (raid10_copies > rs->md.raid_disks) { + rs->ti->error = "Not enough devices to satisfy specification"; + return -EINVAL; + } + + /* (Len * #mirrors) / #devices */ + sectors_per_dev = rs->ti->len * raid10_copies; + sector_div(sectors_per_dev, rs->md.raid_disks); + + rs->md.layout = raid10_format_to_md_layout(raid10_format, + raid10_copies); + rs->md.new_layout = rs->md.layout; + } else if ((rs->raid_type->level > 1) && + sector_div(sectors_per_dev, + (rs->md.raid_disks - rs->raid_type->parity_devs))) { rs->ti->error = "Target length not divisible by number of data devices"; return -EINVAL; } @@ -566,6 +629,9 @@ static int raid_is_congested(struct dm_target_callbacks *cb, int bits) if (rs->raid_type->level == 1) return md_raid1_congested(&rs->md, bits); + if (rs->raid_type->level == 10) + return md_raid10_congested(&rs->md, bits); + return md_raid5_congested(&rs->md, bits); } @@ -884,6 +950,9 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) case 6: redundancy = rs->raid_type->parity_devs; break; + case 10: + redundancy = raid10_md_layout_to_copies(mddev->layout) - 1; + break; default: ti->error = "Unknown RAID type"; return -EINVAL; @@ -1049,12 +1118,19 @@ static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv) goto bad; } + if (ti->len != rs->md.array_sectors) { + ti->error = "Array size does not match requested target length"; + ret = -EINVAL; + goto size_mismatch; + } rs->callbacks.congested_fn = raid_is_congested; dm_table_add_target_callbacks(ti->table, &rs->callbacks); mddev_suspend(&rs->md); return 0; +size_mismatch: + md_stop(&rs->md); bad: context_free(rs); @@ -1203,6 +1279,13 @@ static int raid_status(struct dm_target *ti, status_type_t type, DMEMIT(" region_size %lu", rs->md.bitmap_info.chunksize >> 9); + if (rs->print_flags & DMPF_RAID10_COPIES) + DMEMIT(" raid10_copies %u", + raid10_md_layout_to_copies(rs->md.layout)); + + if (rs->print_flags & DMPF_RAID10_FORMAT) + DMEMIT(" raid10_format near"); + DMEMIT(" %d", rs->md.raid_disks); for (i = 0; i < rs->md.raid_disks; i++) { if (rs->dev[i].meta_dev) @@ -1277,7 +1360,7 @@ static void raid_resume(struct dm_target *ti) static struct target_type raid_target = { .name = "raid", - .version = {1, 2, 0}, + .version = {1, 3, 0}, .module = THIS_MODULE, .ctr = raid_ctr, .dtr = raid_dtr, @@ -1304,6 +1387,8 @@ module_init(dm_raid_init); module_exit(dm_raid_exit); MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target"); +MODULE_ALIAS("dm-raid1"); +MODULE_ALIAS("dm-raid10"); MODULE_ALIAS("dm-raid4"); MODULE_ALIAS("dm-raid5"); MODULE_ALIAS("dm-raid6"); diff --git a/drivers/md/md.c b/drivers/md/md.c index d5ab4493c8be..fcd098794d37 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -498,61 +498,13 @@ void md_flush_request(struct mddev *mddev, struct bio *bio) } EXPORT_SYMBOL(md_flush_request); -/* Support for plugging. - * This mirrors the plugging support in request_queue, but does not - * require having a whole queue or request structures. - * We allocate an md_plug_cb for each md device and each thread it gets - * plugged on. This links tot the private plug_handle structure in the - * personality data where we keep a count of the number of outstanding - * plugs so other code can see if a plug is active. - */ -struct md_plug_cb { - struct blk_plug_cb cb; - struct mddev *mddev; -}; - -static void plugger_unplug(struct blk_plug_cb *cb) -{ - struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb); - if (atomic_dec_and_test(&mdcb->mddev->plug_cnt)) - md_wakeup_thread(mdcb->mddev->thread); - kfree(mdcb); -} - -/* Check that an unplug wakeup will come shortly. - * If not, wakeup the md thread immediately - */ -int mddev_check_plugged(struct mddev *mddev) +void md_unplug(struct blk_plug_cb *cb, bool from_schedule) { - struct blk_plug *plug = current->plug; - struct md_plug_cb *mdcb; - - if (!plug) - return 0; - - list_for_each_entry(mdcb, &plug->cb_list, cb.list) { - if (mdcb->cb.callback == plugger_unplug && - mdcb->mddev == mddev) { - /* Already on the list, move to top */ - if (mdcb != list_first_entry(&plug->cb_list, - struct md_plug_cb, - cb.list)) - list_move(&mdcb->cb.list, &plug->cb_list); - return 1; - } - } - /* Not currently on the callback list */ - mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC); - if (!mdcb) - return 0; - - mdcb->mddev = mddev; - mdcb->cb.callback = plugger_unplug; - atomic_inc(&mddev->plug_cnt); - list_add(&mdcb->cb.list, &plug->cb_list); - return 1; + struct mddev *mddev = cb->data; + md_wakeup_thread(mddev->thread); + kfree(cb); } -EXPORT_SYMBOL_GPL(mddev_check_plugged); +EXPORT_SYMBOL(md_unplug); static inline struct mddev *mddev_get(struct mddev *mddev) { @@ -602,7 +554,6 @@ void mddev_init(struct mddev *mddev) atomic_set(&mddev->active, 1); atomic_set(&mddev->openers, 0); atomic_set(&mddev->active_io, 0); - atomic_set(&mddev->plug_cnt, 0); spin_lock_init(&mddev->write_lock); atomic_set(&mddev->flush_pending, 0); init_waitqueue_head(&mddev->sb_wait); @@ -3942,17 +3893,13 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len) break; case clear: /* stopping an active array */ - if (atomic_read(&mddev->openers) > 0) - return -EBUSY; err = do_md_stop(mddev, 0, NULL); break; case inactive: /* stopping an active array */ - if (mddev->pers) { - if (atomic_read(&mddev->openers) > 0) - return -EBUSY; + if (mddev->pers) err = do_md_stop(mddev, 2, NULL); - } else + else err = 0; /* already inactive */ break; case suspended: diff --git a/drivers/md/md.h b/drivers/md/md.h index 7b4a3c318cae..f385b038589d 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -266,9 +266,6 @@ struct mddev { int new_chunk_sectors; int reshape_backwards; - atomic_t plug_cnt; /* If device is expecting - * more bios soon. - */ struct md_thread *thread; /* management thread */ struct md_thread *sync_thread; /* doing resync or reconstruct */ sector_t curr_resync; /* last block scheduled */ @@ -630,6 +627,12 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, struct mddev *mddev); extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, struct mddev *mddev); -extern int mddev_check_plugged(struct mddev *mddev); extern void md_trim_bio(struct bio *bio, int offset, int size); + +extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule); +static inline int mddev_check_plugged(struct mddev *mddev) +{ + return !!blk_check_plugged(md_unplug, mddev, + sizeof(struct blk_plug_cb)); +} #endif /* _MD_MD_H */ diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index cacd008d6864..611b5f797618 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -46,6 +46,20 @@ */ #define NR_RAID1_BIOS 256 +/* when we get a read error on a read-only array, we redirect to another + * device without failing the first device, or trying to over-write to + * correct the read error. To keep track of bad blocks on a per-bio + * level, we store IO_BLOCKED in the appropriate 'bios' pointer + */ +#define IO_BLOCKED ((struct bio *)1) +/* When we successfully write to a known bad-block, we need to remove the + * bad-block marking which must be done from process context. So we record + * the success by setting devs[n].bio to IO_MADE_GOOD + */ +#define IO_MADE_GOOD ((struct bio *)2) + +#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) + /* When there are this many requests queue to be written by * the raid1 thread, we become 'congested' to provide back-pressure * for writeback. @@ -483,12 +497,14 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect const sector_t this_sector = r1_bio->sector; int sectors; int best_good_sectors; - int start_disk; - int best_disk; - int i; + int best_disk, best_dist_disk, best_pending_disk; + int has_nonrot_disk; + int disk; sector_t best_dist; + unsigned int min_pending; struct md_rdev *rdev; int choose_first; + int choose_next_idle; rcu_read_lock(); /* @@ -499,26 +515,26 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect retry: sectors = r1_bio->sectors; best_disk = -1; + best_dist_disk = -1; best_dist = MaxSector; + best_pending_disk = -1; + min_pending = UINT_MAX; best_good_sectors = 0; + has_nonrot_disk = 0; + choose_next_idle = 0; if (conf->mddev->recovery_cp < MaxSector && - (this_sector + sectors >= conf->next_resync)) { + (this_sector + sectors >= conf->next_resync)) choose_first = 1; - start_disk = 0; - } else { + else choose_first = 0; - start_disk = conf->last_used; - } - for (i = 0 ; i < conf->raid_disks * 2 ; i++) { + for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) { sector_t dist; sector_t first_bad; int bad_sectors; - - int disk = start_disk + i; - if (disk >= conf->raid_disks * 2) - disk -= conf->raid_disks * 2; + unsigned int pending; + bool nonrot; rdev = rcu_dereference(conf->mirrors[disk].rdev); if (r1_bio->bios[disk] == IO_BLOCKED @@ -577,22 +593,77 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect } else best_good_sectors = sectors; + nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev)); + has_nonrot_disk |= nonrot; + pending = atomic_read(&rdev->nr_pending); dist = abs(this_sector - conf->mirrors[disk].head_position); - if (choose_first - /* Don't change to another disk for sequential reads */ - || conf->next_seq_sect == this_sector - || dist == 0 - /* If device is idle, use it */ - || atomic_read(&rdev->nr_pending) == 0) { + if (choose_first) { + best_disk = disk; + break; + } + /* Don't change to another disk for sequential reads */ + if (conf->mirrors[disk].next_seq_sect == this_sector + || dist == 0) { + int opt_iosize = bdev_io_opt(rdev->bdev) >> 9; + struct raid1_info *mirror = &conf->mirrors[disk]; + + best_disk = disk; + /* + * If buffered sequential IO size exceeds optimal + * iosize, check if there is idle disk. If yes, choose + * the idle disk. read_balance could already choose an + * idle disk before noticing it's a sequential IO in + * this disk. This doesn't matter because this disk + * will idle, next time it will be utilized after the + * first disk has IO size exceeds optimal iosize. In + * this way, iosize of the first disk will be optimal + * iosize at least. iosize of the second disk might be + * small, but not a big deal since when the second disk + * starts IO, the first disk is likely still busy. + */ + if (nonrot && opt_iosize > 0 && + mirror->seq_start != MaxSector && + mirror->next_seq_sect > opt_iosize && + mirror->next_seq_sect - opt_iosize >= + mirror->seq_start) { + choose_next_idle = 1; + continue; + } + break; + } + /* If device is idle, use it */ + if (pending == 0) { best_disk = disk; break; } + + if (choose_next_idle) + continue; + + if (min_pending > pending) { + min_pending = pending; + best_pending_disk = disk; + } + if (dist < best_dist) { best_dist = dist; - best_disk = disk; + best_dist_disk = disk; } } + /* + * If all disks are rotational, choose the closest disk. If any disk is + * non-rotational, choose the disk with less pending request even the + * disk is rotational, which might/might not be optimal for raids with + * mixed ratation/non-rotational disks depending on workload. + */ + if (best_disk == -1) { + if (has_nonrot_disk) + best_disk = best_pending_disk; + else + best_disk = best_dist_disk; + } + if (best_disk >= 0) { rdev = rcu_dereference(conf->mirrors[best_disk].rdev); if (!rdev) @@ -606,8 +677,11 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect goto retry; } sectors = best_good_sectors; - conf->next_seq_sect = this_sector + sectors; - conf->last_used = best_disk; + + if (conf->mirrors[best_disk].next_seq_sect != this_sector) + conf->mirrors[best_disk].seq_start = this_sector; + + conf->mirrors[best_disk].next_seq_sect = this_sector + sectors; } rcu_read_unlock(); *max_sectors = sectors; @@ -870,10 +944,48 @@ do_sync_io: pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); } +struct raid1_plug_cb { + struct blk_plug_cb cb; + struct bio_list pending; + int pending_cnt; +}; + +static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule) +{ + struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb, + cb); + struct mddev *mddev = plug->cb.data; + struct r1conf *conf = mddev->private; + struct bio *bio; + + if (from_schedule) { + spin_lock_irq(&conf->device_lock); + bio_list_merge(&conf->pending_bio_list, &plug->pending); + conf->pending_count += plug->pending_cnt; + spin_unlock_irq(&conf->device_lock); + md_wakeup_thread(mddev->thread); + kfree(plug); + return; + } + + /* we aren't scheduling, so we can do the write-out directly. */ + bio = bio_list_get(&plug->pending); + bitmap_unplug(mddev->bitmap); + wake_up(&conf->wait_barrier); + + while (bio) { /* submit pending writes */ + struct bio *next = bio->bi_next; + bio->bi_next = NULL; + generic_make_request(bio); + bio = next; + } + kfree(plug); +} + static void make_request(struct mddev *mddev, struct bio * bio) { struct r1conf *conf = mddev->private; - struct mirror_info *mirror; + struct raid1_info *mirror; struct r1bio *r1_bio; struct bio *read_bio; int i, disks; @@ -883,6 +995,8 @@ static void make_request(struct mddev *mddev, struct bio * bio) const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); struct md_rdev *blocked_rdev; + struct blk_plug_cb *cb; + struct raid1_plug_cb *plug = NULL; int first_clone; int sectors_handled; int max_sectors; @@ -1185,11 +1299,22 @@ read_again: mbio->bi_private = r1_bio; atomic_inc(&r1_bio->remaining); + + cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug)); + if (cb) + plug = container_of(cb, struct raid1_plug_cb, cb); + else + plug = NULL; spin_lock_irqsave(&conf->device_lock, flags); - bio_list_add(&conf->pending_bio_list, mbio); - conf->pending_count++; + if (plug) { + bio_list_add(&plug->pending, mbio); + plug->pending_cnt++; + } else { + bio_list_add(&conf->pending_bio_list, mbio); + conf->pending_count++; + } spin_unlock_irqrestore(&conf->device_lock, flags); - if (!mddev_check_plugged(mddev)) + if (!plug) md_wakeup_thread(mddev->thread); } /* Mustn't call r1_bio_write_done before this next test, @@ -1364,7 +1489,7 @@ static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev) struct r1conf *conf = mddev->private; int err = -EEXIST; int mirror = 0; - struct mirror_info *p; + struct raid1_info *p; int first = 0; int last = conf->raid_disks - 1; struct request_queue *q = bdev_get_queue(rdev->bdev); @@ -1433,7 +1558,7 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) struct r1conf *conf = mddev->private; int err = 0; int number = rdev->raid_disk; - struct mirror_info *p = conf->mirrors+ number; + struct raid1_info *p = conf->mirrors + number; if (rdev != p->rdev) p = conf->mirrors + conf->raid_disks + number; @@ -2173,8 +2298,7 @@ static void raid1d(struct mddev *mddev) blk_start_plug(&plug); for (;;) { - if (atomic_read(&mddev->plug_cnt) == 0) - flush_pending_writes(conf); + flush_pending_writes(conf); spin_lock_irqsave(&conf->device_lock, flags); if (list_empty(head)) { @@ -2371,6 +2495,18 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp bio->bi_rw = READ; bio->bi_end_io = end_sync_read; read_targets++; + } else if (!test_bit(WriteErrorSeen, &rdev->flags) && + test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && + !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) { + /* + * The device is suitable for reading (InSync), + * but has bad block(s) here. Let's try to correct them, + * if we are doing resync or repair. Otherwise, leave + * this device alone for this sync request. + */ + bio->bi_rw = WRITE; + bio->bi_end_io = end_sync_write; + write_targets++; } } if (bio->bi_end_io) { @@ -2428,7 +2564,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp /* There is nowhere to write, so all non-sync * drives must be failed - so we are finished */ - sector_t rv = max_sector - sector_nr; + sector_t rv; + if (min_bad > 0) + max_sector = sector_nr + min_bad; + rv = max_sector - sector_nr; *skipped = 1; put_buf(r1_bio); return rv; @@ -2521,7 +2660,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) { struct r1conf *conf; int i; - struct mirror_info *disk; + struct raid1_info *disk; struct md_rdev *rdev; int err = -ENOMEM; @@ -2529,7 +2668,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) if (!conf) goto abort; - conf->mirrors = kzalloc(sizeof(struct mirror_info) + conf->mirrors = kzalloc(sizeof(struct raid1_info) * mddev->raid_disks * 2, GFP_KERNEL); if (!conf->mirrors) @@ -2572,6 +2711,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) mddev->merge_check_needed = 1; disk->head_position = 0; + disk->seq_start = MaxSector; } conf->raid_disks = mddev->raid_disks; conf->mddev = mddev; @@ -2585,7 +2725,6 @@ static struct r1conf *setup_conf(struct mddev *mddev) conf->recovery_disabled = mddev->recovery_disabled - 1; err = -EIO; - conf->last_used = -1; for (i = 0; i < conf->raid_disks * 2; i++) { disk = conf->mirrors + i; @@ -2611,19 +2750,9 @@ static struct r1conf *setup_conf(struct mddev *mddev) if (disk->rdev && (disk->rdev->saved_raid_disk < 0)) conf->fullsync = 1; - } else if (conf->last_used < 0) - /* - * The first working device is used as a - * starting point to read balancing. - */ - conf->last_used = i; + } } - if (conf->last_used < 0) { - printk(KERN_ERR "md/raid1:%s: no operational mirrors\n", - mdname(mddev)); - goto abort; - } err = -ENOMEM; conf->thread = md_register_thread(raid1d, mddev, "raid1"); if (!conf->thread) { @@ -2798,7 +2927,7 @@ static int raid1_reshape(struct mddev *mddev) */ mempool_t *newpool, *oldpool; struct pool_info *newpoolinfo; - struct mirror_info *newmirrors; + struct raid1_info *newmirrors; struct r1conf *conf = mddev->private; int cnt, raid_disks; unsigned long flags; @@ -2841,7 +2970,7 @@ static int raid1_reshape(struct mddev *mddev) kfree(newpoolinfo); return -ENOMEM; } - newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks * 2, + newmirrors = kzalloc(sizeof(struct raid1_info) * raid_disks * 2, GFP_KERNEL); if (!newmirrors) { kfree(newpoolinfo); @@ -2880,7 +3009,6 @@ static int raid1_reshape(struct mddev *mddev) conf->raid_disks = mddev->raid_disks = raid_disks; mddev->delta_disks = 0; - conf->last_used = 0; /* just make sure it is in-range */ lower_barrier(conf); set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); diff --git a/drivers/md/raid1.h b/drivers/md/raid1.h index 80ded139314c..0ff3715fb7eb 100644 --- a/drivers/md/raid1.h +++ b/drivers/md/raid1.h @@ -1,9 +1,15 @@ #ifndef _RAID1_H #define _RAID1_H -struct mirror_info { +struct raid1_info { struct md_rdev *rdev; sector_t head_position; + + /* When choose the best device for a read (read_balance()) + * we try to keep sequential reads one the same device + */ + sector_t next_seq_sect; + sector_t seq_start; }; /* @@ -24,17 +30,11 @@ struct pool_info { struct r1conf { struct mddev *mddev; - struct mirror_info *mirrors; /* twice 'raid_disks' to + struct raid1_info *mirrors; /* twice 'raid_disks' to * allow for replacements. */ int raid_disks; - /* When choose the best device for a read (read_balance()) - * we try to keep sequential reads one the same device - * using 'last_used' and 'next_seq_sect' - */ - int last_used; - sector_t next_seq_sect; /* During resync, read_balancing is only allowed on the part * of the array that has been resynced. 'next_resync' tells us * where that is. @@ -135,20 +135,6 @@ struct r1bio { /* DO NOT PUT ANY NEW FIELDS HERE - bios array is contiguously alloced*/ }; -/* when we get a read error on a read-only array, we redirect to another - * device without failing the first device, or trying to over-write to - * correct the read error. To keep track of bad blocks on a per-bio - * level, we store IO_BLOCKED in the appropriate 'bios' pointer - */ -#define IO_BLOCKED ((struct bio *)1) -/* When we successfully write to a known bad-block, we need to remove the - * bad-block marking which must be done from process context. So we record - * the success by setting bios[n] to IO_MADE_GOOD - */ -#define IO_MADE_GOOD ((struct bio *)2) - -#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) - /* bits for r1bio.state */ #define R1BIO_Uptodate 0 #define R1BIO_IsSync 1 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 8da6282254c3..de5ed6fd8806 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -60,7 +60,21 @@ */ #define NR_RAID10_BIOS 256 -/* When there are this many requests queue to be written by +/* when we get a read error on a read-only array, we redirect to another + * device without failing the first device, or trying to over-write to + * correct the read error. To keep track of bad blocks on a per-bio + * level, we store IO_BLOCKED in the appropriate 'bios' pointer + */ +#define IO_BLOCKED ((struct bio *)1) +/* When we successfully write to a known bad-block, we need to remove the + * bad-block marking which must be done from process context. So we record + * the success by setting devs[n].bio to IO_MADE_GOOD + */ +#define IO_MADE_GOOD ((struct bio *)2) + +#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) + +/* When there are this many requests queued to be written by * the raid10 thread, we become 'congested' to provide back-pressure * for writeback. */ @@ -717,7 +731,7 @@ static struct md_rdev *read_balance(struct r10conf *conf, int sectors = r10_bio->sectors; int best_good_sectors; sector_t new_distance, best_dist; - struct md_rdev *rdev, *best_rdev; + struct md_rdev *best_rdev, *rdev = NULL; int do_balance; int best_slot; struct geom *geo = &conf->geo; @@ -839,9 +853,8 @@ retry: return rdev; } -static int raid10_congested(void *data, int bits) +int md_raid10_congested(struct mddev *mddev, int bits) { - struct mddev *mddev = data; struct r10conf *conf = mddev->private; int i, ret = 0; @@ -849,8 +862,6 @@ static int raid10_congested(void *data, int bits) conf->pending_count >= max_queued_requests) return 1; - if (mddev_congested(mddev, bits)) - return 1; rcu_read_lock(); for (i = 0; (i < conf->geo.raid_disks || i < conf->prev.raid_disks) @@ -866,6 +877,15 @@ static int raid10_congested(void *data, int bits) rcu_read_unlock(); return ret; } +EXPORT_SYMBOL_GPL(md_raid10_congested); + +static int raid10_congested(void *data, int bits) +{ + struct mddev *mddev = data; + + return mddev_congested(mddev, bits) || + md_raid10_congested(mddev, bits); +} static void flush_pending_writes(struct r10conf *conf) { @@ -1546,7 +1566,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev) static void print_conf(struct r10conf *conf) { int i; - struct mirror_info *tmp; + struct raid10_info *tmp; printk(KERN_DEBUG "RAID10 conf printout:\n"); if (!conf) { @@ -1580,7 +1600,7 @@ static int raid10_spare_active(struct mddev *mddev) { int i; struct r10conf *conf = mddev->private; - struct mirror_info *tmp; + struct raid10_info *tmp; int count = 0; unsigned long flags; @@ -1655,7 +1675,7 @@ static int raid10_add_disk(struct mddev *mddev, struct md_rdev *rdev) else mirror = first; for ( ; mirror <= last ; mirror++) { - struct mirror_info *p = &conf->mirrors[mirror]; + struct raid10_info *p = &conf->mirrors[mirror]; if (p->recovery_disabled == mddev->recovery_disabled) continue; if (p->rdev) { @@ -1709,7 +1729,7 @@ static int raid10_remove_disk(struct mddev *mddev, struct md_rdev *rdev) int err = 0; int number = rdev->raid_disk; struct md_rdev **rdevp; - struct mirror_info *p = conf->mirrors + number; + struct raid10_info *p = conf->mirrors + number; print_conf(conf); if (rdev == p->rdev) @@ -2660,8 +2680,7 @@ static void raid10d(struct mddev *mddev) blk_start_plug(&plug); for (;;) { - if (atomic_read(&mddev->plug_cnt) == 0) - flush_pending_writes(conf); + flush_pending_writes(conf); spin_lock_irqsave(&conf->device_lock, flags); if (list_empty(head)) { @@ -2876,7 +2895,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, sector_t sect; int must_sync; int any_working; - struct mirror_info *mirror = &conf->mirrors[i]; + struct raid10_info *mirror = &conf->mirrors[i]; if ((mirror->rdev == NULL || test_bit(In_sync, &mirror->rdev->flags)) @@ -3388,7 +3407,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) goto out; /* FIXME calc properly */ - conf->mirrors = kzalloc(sizeof(struct mirror_info)*(mddev->raid_disks + + conf->mirrors = kzalloc(sizeof(struct raid10_info)*(mddev->raid_disks + max(0,mddev->delta_disks)), GFP_KERNEL); if (!conf->mirrors) @@ -3452,7 +3471,7 @@ static int run(struct mddev *mddev) { struct r10conf *conf; int i, disk_idx, chunk_size; - struct mirror_info *disk; + struct raid10_info *disk; struct md_rdev *rdev; sector_t size; sector_t min_offset_diff = 0; @@ -3472,12 +3491,14 @@ static int run(struct mddev *mddev) conf->thread = NULL; chunk_size = mddev->chunk_sectors << 9; - blk_queue_io_min(mddev->queue, chunk_size); - if (conf->geo.raid_disks % conf->geo.near_copies) - blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); - else - blk_queue_io_opt(mddev->queue, chunk_size * - (conf->geo.raid_disks / conf->geo.near_copies)); + if (mddev->queue) { + blk_queue_io_min(mddev->queue, chunk_size); + if (conf->geo.raid_disks % conf->geo.near_copies) + blk_queue_io_opt(mddev->queue, chunk_size * conf->geo.raid_disks); + else + blk_queue_io_opt(mddev->queue, chunk_size * + (conf->geo.raid_disks / conf->geo.near_copies)); + } rdev_for_each(rdev, mddev) { long long diff; @@ -3511,8 +3532,9 @@ static int run(struct mddev *mddev) if (first || diff < min_offset_diff) min_offset_diff = diff; - disk_stack_limits(mddev->gendisk, rdev->bdev, - rdev->data_offset << 9); + if (mddev->gendisk) + disk_stack_limits(mddev->gendisk, rdev->bdev, + rdev->data_offset << 9); disk->head_position = 0; } @@ -3575,22 +3597,22 @@ static int run(struct mddev *mddev) md_set_array_sectors(mddev, size); mddev->resync_max_sectors = size; - mddev->queue->backing_dev_info.congested_fn = raid10_congested; - mddev->queue->backing_dev_info.congested_data = mddev; - - /* Calculate max read-ahead size. - * We need to readahead at least twice a whole stripe.... - * maybe... - */ - { + if (mddev->queue) { int stripe = conf->geo.raid_disks * ((mddev->chunk_sectors << 9) / PAGE_SIZE); + mddev->queue->backing_dev_info.congested_fn = raid10_congested; + mddev->queue->backing_dev_info.congested_data = mddev; + + /* Calculate max read-ahead size. + * We need to readahead at least twice a whole stripe.... + * maybe... + */ stripe /= conf->geo.near_copies; if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) mddev->queue->backing_dev_info.ra_pages = 2 * stripe; + blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); } - blk_queue_merge_bvec(mddev->queue, raid10_mergeable_bvec); if (md_integrity_register(mddev)) goto out_free_conf; @@ -3641,7 +3663,10 @@ static int stop(struct mddev *mddev) lower_barrier(conf); md_unregister_thread(&mddev->thread); - blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ + if (mddev->queue) + /* the unplug fn references 'conf'*/ + blk_sync_queue(mddev->queue); + if (conf->r10bio_pool) mempool_destroy(conf->r10bio_pool); kfree(conf->mirrors); @@ -3805,7 +3830,7 @@ static int raid10_check_reshape(struct mddev *mddev) if (mddev->delta_disks > 0) { /* allocate new 'mirrors' list */ conf->mirrors_new = kzalloc( - sizeof(struct mirror_info) + sizeof(struct raid10_info) *(mddev->raid_disks + mddev->delta_disks), GFP_KERNEL); @@ -3930,7 +3955,7 @@ static int raid10_start_reshape(struct mddev *mddev) spin_lock_irq(&conf->device_lock); if (conf->mirrors_new) { memcpy(conf->mirrors_new, conf->mirrors, - sizeof(struct mirror_info)*conf->prev.raid_disks); + sizeof(struct raid10_info)*conf->prev.raid_disks); smp_mb(); kfree(conf->mirrors_old); /* FIXME and elsewhere */ conf->mirrors_old = conf->mirrors; diff --git a/drivers/md/raid10.h b/drivers/md/raid10.h index 135b1b0a1554..007c2c68dd83 100644 --- a/drivers/md/raid10.h +++ b/drivers/md/raid10.h @@ -1,7 +1,7 @@ #ifndef _RAID10_H #define _RAID10_H -struct mirror_info { +struct raid10_info { struct md_rdev *rdev, *replacement; sector_t head_position; int recovery_disabled; /* matches @@ -13,8 +13,8 @@ struct mirror_info { struct r10conf { struct mddev *mddev; - struct mirror_info *mirrors; - struct mirror_info *mirrors_new, *mirrors_old; + struct raid10_info *mirrors; + struct raid10_info *mirrors_new, *mirrors_old; spinlock_t device_lock; /* geometry */ @@ -123,20 +123,6 @@ struct r10bio { } devs[0]; }; -/* when we get a read error on a read-only array, we redirect to another - * device without failing the first device, or trying to over-write to - * correct the read error. To keep track of bad blocks on a per-bio - * level, we store IO_BLOCKED in the appropriate 'bios' pointer - */ -#define IO_BLOCKED ((struct bio*)1) -/* When we successfully write to a known bad-block, we need to remove the - * bad-block marking which must be done from process context. So we record - * the success by setting devs[n].bio to IO_MADE_GOOD - */ -#define IO_MADE_GOOD ((struct bio *)2) - -#define BIO_SPECIAL(bio) ((unsigned long)bio <= 2) - /* bits for r10bio.state */ enum r10bio_state { R10BIO_Uptodate, @@ -159,4 +145,7 @@ enum r10bio_state { */ R10BIO_Previous, }; + +extern int md_raid10_congested(struct mddev *mddev, int bits); + #endif diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 04348d76bb30..adda94df5eb2 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -99,34 +99,40 @@ static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) * We maintain a biased count of active stripes in the bottom 16 bits of * bi_phys_segments, and a count of processed stripes in the upper 16 bits */ -static inline int raid5_bi_phys_segments(struct bio *bio) +static inline int raid5_bi_processed_stripes(struct bio *bio) { - return bio->bi_phys_segments & 0xffff; + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + return (atomic_read(segments) >> 16) & 0xffff; } -static inline int raid5_bi_hw_segments(struct bio *bio) +static inline int raid5_dec_bi_active_stripes(struct bio *bio) { - return (bio->bi_phys_segments >> 16) & 0xffff; + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + return atomic_sub_return(1, segments) & 0xffff; } -static inline int raid5_dec_bi_phys_segments(struct bio *bio) +static inline void raid5_inc_bi_active_stripes(struct bio *bio) { - --bio->bi_phys_segments; - return raid5_bi_phys_segments(bio); + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + atomic_inc(segments); } -static inline int raid5_dec_bi_hw_segments(struct bio *bio) +static inline void raid5_set_bi_processed_stripes(struct bio *bio, + unsigned int cnt) { - unsigned short val = raid5_bi_hw_segments(bio); + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + int old, new; - --val; - bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio); - return val; + do { + old = atomic_read(segments); + new = (old & 0xffff) | (cnt << 16); + } while (atomic_cmpxchg(segments, old, new) != old); } -static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) +static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt) { - bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16); + atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; + atomic_set(segments, cnt); } /* Find first data disk in a raid6 stripe */ @@ -190,49 +196,56 @@ static int stripe_operations_active(struct stripe_head *sh) test_bit(STRIPE_COMPUTE_RUN, &sh->state); } -static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) +static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh) { - if (atomic_dec_and_test(&sh->count)) { - BUG_ON(!list_empty(&sh->lru)); - BUG_ON(atomic_read(&conf->active_stripes)==0); - if (test_bit(STRIPE_HANDLE, &sh->state)) { - if (test_bit(STRIPE_DELAYED, &sh->state) && - !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) - list_add_tail(&sh->lru, &conf->delayed_list); - else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && - sh->bm_seq - conf->seq_write > 0) - list_add_tail(&sh->lru, &conf->bitmap_list); - else { - clear_bit(STRIPE_DELAYED, &sh->state); - clear_bit(STRIPE_BIT_DELAY, &sh->state); - list_add_tail(&sh->lru, &conf->handle_list); - } - md_wakeup_thread(conf->mddev->thread); - } else { - BUG_ON(stripe_operations_active(sh)); - if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) - if (atomic_dec_return(&conf->preread_active_stripes) - < IO_THRESHOLD) - md_wakeup_thread(conf->mddev->thread); - atomic_dec(&conf->active_stripes); - if (!test_bit(STRIPE_EXPANDING, &sh->state)) { - list_add_tail(&sh->lru, &conf->inactive_list); - wake_up(&conf->wait_for_stripe); - if (conf->retry_read_aligned) - md_wakeup_thread(conf->mddev->thread); - } + BUG_ON(!list_empty(&sh->lru)); + BUG_ON(atomic_read(&conf->active_stripes)==0); + if (test_bit(STRIPE_HANDLE, &sh->state)) { + if (test_bit(STRIPE_DELAYED, &sh->state) && + !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + list_add_tail(&sh->lru, &conf->delayed_list); + else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && + sh->bm_seq - conf->seq_write > 0) + list_add_tail(&sh->lru, &conf->bitmap_list); + else { + clear_bit(STRIPE_DELAYED, &sh->state); + clear_bit(STRIPE_BIT_DELAY, &sh->state); + list_add_tail(&sh->lru, &conf->handle_list); + } + md_wakeup_thread(conf->mddev->thread); + } else { + BUG_ON(stripe_operations_active(sh)); + if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) + if (atomic_dec_return(&conf->preread_active_stripes) + < IO_THRESHOLD) + md_wakeup_thread(conf->mddev->thread); + atomic_dec(&conf->active_stripes); + if (!test_bit(STRIPE_EXPANDING, &sh->state)) { + list_add_tail(&sh->lru, &conf->inactive_list); + wake_up(&conf->wait_for_stripe); + if (conf->retry_read_aligned) + md_wakeup_thread(conf->mddev->thread); } } } +static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) +{ + if (atomic_dec_and_test(&sh->count)) + do_release_stripe(conf, sh); +} + static void release_stripe(struct stripe_head *sh) { struct r5conf *conf = sh->raid_conf; unsigned long flags; - spin_lock_irqsave(&conf->device_lock, flags); - __release_stripe(conf, sh); - spin_unlock_irqrestore(&conf->device_lock, flags); + local_irq_save(flags); + if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { + do_release_stripe(conf, sh); + spin_unlock(&conf->device_lock); + } + local_irq_restore(flags); } static inline void remove_hash(struct stripe_head *sh) @@ -471,7 +484,8 @@ get_active_stripe(struct r5conf *conf, sector_t sector, } else { if (atomic_read(&sh->count)) { BUG_ON(!list_empty(&sh->lru) - && !test_bit(STRIPE_EXPANDING, &sh->state)); + && !test_bit(STRIPE_EXPANDING, &sh->state) + && !test_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)); } else { if (!test_bit(STRIPE_HANDLE, &sh->state)) atomic_inc(&conf->active_stripes); @@ -640,6 +654,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) else bi->bi_sector = (sh->sector + rdev->data_offset); + if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) + bi->bi_rw |= REQ_FLUSH; + bi->bi_flags = 1 << BIO_UPTODATE; bi->bi_idx = 0; bi->bi_io_vec[0].bv_len = STRIPE_SIZE; @@ -749,14 +766,12 @@ static void ops_complete_biofill(void *stripe_head_ref) { struct stripe_head *sh = stripe_head_ref; struct bio *return_bi = NULL; - struct r5conf *conf = sh->raid_conf; int i; pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); /* clear completed biofills */ - spin_lock_irq(&conf->device_lock); for (i = sh->disks; i--; ) { struct r5dev *dev = &sh->dev[i]; @@ -774,7 +789,7 @@ static void ops_complete_biofill(void *stripe_head_ref) while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { rbi2 = r5_next_bio(rbi, dev->sector); - if (!raid5_dec_bi_phys_segments(rbi)) { + if (!raid5_dec_bi_active_stripes(rbi)) { rbi->bi_next = return_bi; return_bi = rbi; } @@ -782,7 +797,6 @@ static void ops_complete_biofill(void *stripe_head_ref) } } } - spin_unlock_irq(&conf->device_lock); clear_bit(STRIPE_BIOFILL_RUN, &sh->state); return_io(return_bi); @@ -794,7 +808,6 @@ static void ops_complete_biofill(void *stripe_head_ref) static void ops_run_biofill(struct stripe_head *sh) { struct dma_async_tx_descriptor *tx = NULL; - struct r5conf *conf = sh->raid_conf; struct async_submit_ctl submit; int i; @@ -805,10 +818,10 @@ static void ops_run_biofill(struct stripe_head *sh) struct r5dev *dev = &sh->dev[i]; if (test_bit(R5_Wantfill, &dev->flags)) { struct bio *rbi; - spin_lock_irq(&conf->device_lock); + spin_lock_irq(&sh->stripe_lock); dev->read = rbi = dev->toread; dev->toread = NULL; - spin_unlock_irq(&conf->device_lock); + spin_unlock_irq(&sh->stripe_lock); while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { tx = async_copy_data(0, rbi, dev->page, @@ -1144,12 +1157,12 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) { struct bio *wbi; - spin_lock_irq(&sh->raid_conf->device_lock); + spin_lock_irq(&sh->stripe_lock); chosen = dev->towrite; dev->towrite = NULL; BUG_ON(dev->written); wbi = dev->written = chosen; - spin_unlock_irq(&sh->raid_conf->device_lock); + spin_unlock_irq(&sh->stripe_lock); while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) { @@ -1454,6 +1467,8 @@ static int grow_one_stripe(struct r5conf *conf) init_waitqueue_head(&sh->ops.wait_for_ops); #endif + spin_lock_init(&sh->stripe_lock); + if (grow_buffers(sh)) { shrink_buffers(sh); kmem_cache_free(conf->slab_cache, sh); @@ -1739,7 +1754,9 @@ static void raid5_end_read_request(struct bio * bi, int error) atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); - } + } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) + clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); + if (atomic_read(&rdev->read_errors)) atomic_set(&rdev->read_errors, 0); } else { @@ -1784,7 +1801,11 @@ static void raid5_end_read_request(struct bio * bi, int error) else retry = 1; if (retry) - set_bit(R5_ReadError, &sh->dev[i].flags); + if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { + set_bit(R5_ReadError, &sh->dev[i].flags); + clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); + } else + set_bit(R5_ReadNoMerge, &sh->dev[i].flags); else { clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); @@ -2340,11 +2361,18 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in (unsigned long long)bi->bi_sector, (unsigned long long)sh->sector); - - spin_lock_irq(&conf->device_lock); + /* + * If several bio share a stripe. The bio bi_phys_segments acts as a + * reference count to avoid race. The reference count should already be + * increased before this function is called (for example, in + * make_request()), so other bio sharing this stripe will not free the + * stripe. If a stripe is owned by one stripe, the stripe lock will + * protect it. + */ + spin_lock_irq(&sh->stripe_lock); if (forwrite) { bip = &sh->dev[dd_idx].towrite; - if (*bip == NULL && sh->dev[dd_idx].written == NULL) + if (*bip == NULL) firstwrite = 1; } else bip = &sh->dev[dd_idx].toread; @@ -2360,7 +2388,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in if (*bip) bi->bi_next = *bip; *bip = bi; - bi->bi_phys_segments++; + raid5_inc_bi_active_stripes(bi); if (forwrite) { /* check if page is covered */ @@ -2375,7 +2403,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); } - spin_unlock_irq(&conf->device_lock); + spin_unlock_irq(&sh->stripe_lock); pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", (unsigned long long)(*bip)->bi_sector, @@ -2391,7 +2419,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in overlap: set_bit(R5_Overlap, &sh->dev[dd_idx].flags); - spin_unlock_irq(&conf->device_lock); + spin_unlock_irq(&sh->stripe_lock); return 0; } @@ -2441,10 +2469,11 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, rdev_dec_pending(rdev, conf->mddev); } } - spin_lock_irq(&conf->device_lock); + spin_lock_irq(&sh->stripe_lock); /* fail all writes first */ bi = sh->dev[i].towrite; sh->dev[i].towrite = NULL; + spin_unlock_irq(&sh->stripe_lock); if (bi) { s->to_write--; bitmap_end = 1; @@ -2457,13 +2486,17 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, sh->dev[i].sector + STRIPE_SECTORS) { struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); clear_bit(BIO_UPTODATE, &bi->bi_flags); - if (!raid5_dec_bi_phys_segments(bi)) { + if (!raid5_dec_bi_active_stripes(bi)) { md_write_end(conf->mddev); bi->bi_next = *return_bi; *return_bi = bi; } bi = nextbi; } + if (bitmap_end) + bitmap_endwrite(conf->mddev->bitmap, sh->sector, + STRIPE_SECTORS, 0, 0); + bitmap_end = 0; /* and fail all 'written' */ bi = sh->dev[i].written; sh->dev[i].written = NULL; @@ -2472,7 +2505,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, sh->dev[i].sector + STRIPE_SECTORS) { struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); clear_bit(BIO_UPTODATE, &bi->bi_flags); - if (!raid5_dec_bi_phys_segments(bi)) { + if (!raid5_dec_bi_active_stripes(bi)) { md_write_end(conf->mddev); bi->bi_next = *return_bi; *return_bi = bi; @@ -2496,14 +2529,13 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); clear_bit(BIO_UPTODATE, &bi->bi_flags); - if (!raid5_dec_bi_phys_segments(bi)) { + if (!raid5_dec_bi_active_stripes(bi)) { bi->bi_next = *return_bi; *return_bi = bi; } bi = nextbi; } } - spin_unlock_irq(&conf->device_lock); if (bitmap_end) bitmap_endwrite(conf->mddev->bitmap, sh->sector, STRIPE_SECTORS, 0, 0); @@ -2707,30 +2739,23 @@ static void handle_stripe_clean_event(struct r5conf *conf, test_bit(R5_UPTODATE, &dev->flags)) { /* We can return any write requests */ struct bio *wbi, *wbi2; - int bitmap_end = 0; pr_debug("Return write for disc %d\n", i); - spin_lock_irq(&conf->device_lock); wbi = dev->written; dev->written = NULL; while (wbi && wbi->bi_sector < dev->sector + STRIPE_SECTORS) { wbi2 = r5_next_bio(wbi, dev->sector); - if (!raid5_dec_bi_phys_segments(wbi)) { + if (!raid5_dec_bi_active_stripes(wbi)) { md_write_end(conf->mddev); wbi->bi_next = *return_bi; *return_bi = wbi; } wbi = wbi2; } - if (dev->towrite == NULL) - bitmap_end = 1; - spin_unlock_irq(&conf->device_lock); - if (bitmap_end) - bitmap_endwrite(conf->mddev->bitmap, - sh->sector, - STRIPE_SECTORS, + bitmap_endwrite(conf->mddev->bitmap, sh->sector, + STRIPE_SECTORS, !test_bit(STRIPE_DEGRADED, &sh->state), - 0); + 0); } } @@ -3182,7 +3207,6 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) /* Now to look around and see what can be done */ rcu_read_lock(); - spin_lock_irq(&conf->device_lock); for (i=disks; i--; ) { struct md_rdev *rdev; sector_t first_bad; @@ -3328,7 +3352,6 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) do_recovery = 1; } } - spin_unlock_irq(&conf->device_lock); if (test_bit(STRIPE_SYNCING, &sh->state)) { /* If there is a failed device being replaced, * we must be recovering. @@ -3791,7 +3814,7 @@ static struct bio *remove_bio_from_retry(struct r5conf *conf) * this sets the active strip count to 1 and the processed * strip count to zero (upper 8 bits) */ - bi->bi_phys_segments = 1; /* biased count of active stripes */ + raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */ } return bi; @@ -3988,6 +4011,62 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf) return sh; } +struct raid5_plug_cb { + struct blk_plug_cb cb; + struct list_head list; +}; + +static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) +{ + struct raid5_plug_cb *cb = container_of( + blk_cb, struct raid5_plug_cb, cb); + struct stripe_head *sh; + struct mddev *mddev = cb->cb.data; + struct r5conf *conf = mddev->private; + + if (cb->list.next && !list_empty(&cb->list)) { + spin_lock_irq(&conf->device_lock); + while (!list_empty(&cb->list)) { + sh = list_first_entry(&cb->list, struct stripe_head, lru); + list_del_init(&sh->lru); + /* + * avoid race release_stripe_plug() sees + * STRIPE_ON_UNPLUG_LIST clear but the stripe + * is still in our list + */ + smp_mb__before_clear_bit(); + clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); + __release_stripe(conf, sh); + } + spin_unlock_irq(&conf->device_lock); + } + kfree(cb); +} + +static void release_stripe_plug(struct mddev *mddev, + struct stripe_head *sh) +{ + struct blk_plug_cb *blk_cb = blk_check_plugged( + raid5_unplug, mddev, + sizeof(struct raid5_plug_cb)); + struct raid5_plug_cb *cb; + + if (!blk_cb) { + release_stripe(sh); + return; + } + + cb = container_of(blk_cb, struct raid5_plug_cb, cb); + + if (cb->list.next == NULL) + INIT_LIST_HEAD(&cb->list); + + if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) + list_add_tail(&sh->lru, &cb->list); + else + release_stripe(sh); +} + static void make_request(struct mddev *mddev, struct bio * bi) { struct r5conf *conf = mddev->private; @@ -4113,11 +4192,10 @@ static void make_request(struct mddev *mddev, struct bio * bi) finish_wait(&conf->wait_for_overlap, &w); set_bit(STRIPE_HANDLE, &sh->state); clear_bit(STRIPE_DELAYED, &sh->state); - if ((bi->bi_rw & REQ_SYNC) && + if ((bi->bi_rw & REQ_NOIDLE) && !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) atomic_inc(&conf->preread_active_stripes); - mddev_check_plugged(mddev); - release_stripe(sh); + release_stripe_plug(mddev, sh); } else { /* cannot get stripe for read-ahead, just give-up */ clear_bit(BIO_UPTODATE, &bi->bi_flags); @@ -4126,9 +4204,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) } } - spin_lock_irq(&conf->device_lock); - remaining = raid5_dec_bi_phys_segments(bi); - spin_unlock_irq(&conf->device_lock); + remaining = raid5_dec_bi_active_stripes(bi); if (remaining == 0) { if ( rw == WRITE ) @@ -4484,7 +4560,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) sector += STRIPE_SECTORS, scnt++) { - if (scnt < raid5_bi_hw_segments(raid_bio)) + if (scnt < raid5_bi_processed_stripes(raid_bio)) /* already done this stripe */ continue; @@ -4492,25 +4568,24 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) if (!sh) { /* failed to get a stripe - must wait */ - raid5_set_bi_hw_segments(raid_bio, scnt); + raid5_set_bi_processed_stripes(raid_bio, scnt); conf->retry_read_aligned = raid_bio; return handled; } if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { release_stripe(sh); - raid5_set_bi_hw_segments(raid_bio, scnt); + raid5_set_bi_processed_stripes(raid_bio, scnt); conf->retry_read_aligned = raid_bio; return handled; } + set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); handle_stripe(sh); release_stripe(sh); handled++; } - spin_lock_irq(&conf->device_lock); - remaining = raid5_dec_bi_phys_segments(raid_bio); - spin_unlock_irq(&conf->device_lock); + remaining = raid5_dec_bi_active_stripes(raid_bio); if (remaining == 0) bio_endio(raid_bio, 0); if (atomic_dec_and_test(&conf->active_aligned_reads)) @@ -4518,6 +4593,30 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) return handled; } +#define MAX_STRIPE_BATCH 8 +static int handle_active_stripes(struct r5conf *conf) +{ + struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; + int i, batch_size = 0; + + while (batch_size < MAX_STRIPE_BATCH && + (sh = __get_priority_stripe(conf)) != NULL) + batch[batch_size++] = sh; + + if (batch_size == 0) + return batch_size; + spin_unlock_irq(&conf->device_lock); + + for (i = 0; i < batch_size; i++) + handle_stripe(batch[i]); + + cond_resched(); + + spin_lock_irq(&conf->device_lock); + for (i = 0; i < batch_size; i++) + __release_stripe(conf, batch[i]); + return batch_size; +} /* * This is our raid5 kernel thread. @@ -4528,7 +4627,6 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) */ static void raid5d(struct mddev *mddev) { - struct stripe_head *sh; struct r5conf *conf = mddev->private; int handled; struct blk_plug plug; @@ -4542,8 +4640,9 @@ static void raid5d(struct mddev *mddev) spin_lock_irq(&conf->device_lock); while (1) { struct bio *bio; + int batch_size; - if (atomic_read(&mddev->plug_cnt) == 0 && + if ( !list_empty(&conf->bitmap_list)) { /* Now is a good time to flush some bitmap updates */ conf->seq_flush++; @@ -4553,8 +4652,7 @@ static void raid5d(struct mddev *mddev) conf->seq_write = conf->seq_flush; activate_bit_delay(conf); } - if (atomic_read(&mddev->plug_cnt) == 0) - raid5_activate_delayed(conf); + raid5_activate_delayed(conf); while ((bio = remove_bio_from_retry(conf))) { int ok; @@ -4566,21 +4664,16 @@ static void raid5d(struct mddev *mddev) handled++; } - sh = __get_priority_stripe(conf); - - if (!sh) + batch_size = handle_active_stripes(conf); + if (!batch_size) break; - spin_unlock_irq(&conf->device_lock); - - handled++; - handle_stripe(sh); - release_stripe(sh); - cond_resched(); + handled += batch_size; - if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) + if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) { + spin_unlock_irq(&conf->device_lock); md_check_recovery(mddev); - - spin_lock_irq(&conf->device_lock); + spin_lock_irq(&conf->device_lock); + } } pr_debug("%d stripes handled\n", handled); diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h index 2164021f3b5f..a9fc24901eda 100644 --- a/drivers/md/raid5.h +++ b/drivers/md/raid5.h @@ -210,6 +210,7 @@ struct stripe_head { int disks; /* disks in stripe */ enum check_states check_state; enum reconstruct_states reconstruct_state; + spinlock_t stripe_lock; /** * struct stripe_operations * @target - STRIPE_OP_COMPUTE_BLK target @@ -273,6 +274,7 @@ enum r5dev_flags { R5_Wantwrite, R5_Overlap, /* There is a pending overlapping request * on this block */ + R5_ReadNoMerge, /* prevent bio from merging in block-layer */ R5_ReadError, /* seen a read error here recently */ R5_ReWrite, /* have tried to over-write the readerror */ @@ -319,6 +321,7 @@ enum { STRIPE_BIOFILL_RUN, STRIPE_COMPUTE_RUN, STRIPE_OPS_REQ_PENDING, + STRIPE_ON_UNPLUG_LIST, }; /* diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 3e8dcf8d2e05..50e08f03aa65 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -17,10 +17,12 @@ #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/interrupt.h> +#include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/timer.h> +#include <linux/omap-dma.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> #include <linux/clk.h> @@ -128,6 +130,10 @@ struct mmc_omap_host { unsigned char id; /* 16xx chips have 2 MMC blocks */ struct clk * iclk; struct clk * fclk; + struct dma_chan *dma_rx; + u32 dma_rx_burst; + struct dma_chan *dma_tx; + u32 dma_tx_burst; struct resource *mem_res; void __iomem *virt_base; unsigned int phys_base; @@ -153,12 +159,8 @@ struct mmc_omap_host { unsigned use_dma:1; unsigned brs_received:1, dma_done:1; - unsigned dma_is_read:1; unsigned dma_in_use:1; - int dma_ch; spinlock_t dma_lock; - struct timer_list dma_timer; - unsigned dma_len; struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS]; struct mmc_omap_slot *current_slot; @@ -406,18 +408,25 @@ mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data, int abort) { enum dma_data_direction dma_data_dir; + struct device *dev = mmc_dev(host->mmc); + struct dma_chan *c; - BUG_ON(host->dma_ch < 0); - if (data->error) - omap_stop_dma(host->dma_ch); - /* Release DMA channel lazily */ - mod_timer(&host->dma_timer, jiffies + HZ); - if (data->flags & MMC_DATA_WRITE) + if (data->flags & MMC_DATA_WRITE) { dma_data_dir = DMA_TO_DEVICE; - else + c = host->dma_tx; + } else { dma_data_dir = DMA_FROM_DEVICE; - dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len, - dma_data_dir); + c = host->dma_rx; + } + if (c) { + if (data->error) { + dmaengine_terminate_all(c); + /* Claim nothing transferred on error... */ + data->bytes_xfered = 0; + } + dev = c->device->dev; + } + dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir); } static void mmc_omap_send_stop_work(struct work_struct *work) @@ -525,16 +534,6 @@ mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data) } static void -mmc_omap_dma_timer(unsigned long data) -{ - struct mmc_omap_host *host = (struct mmc_omap_host *) data; - - BUG_ON(host->dma_ch < 0); - omap_free_dma(host->dma_ch); - host->dma_ch = -1; -} - -static void mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data) { unsigned long flags; @@ -891,159 +890,15 @@ static void mmc_omap_cover_handler(unsigned long param) jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY)); } -/* Prepare to transfer the next segment of a scatterlist */ -static void -mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data) +static void mmc_omap_dma_callback(void *priv) { - int dma_ch = host->dma_ch; - unsigned long data_addr; - u16 buf, frame; - u32 count; - struct scatterlist *sg = &data->sg[host->sg_idx]; - int src_port = 0; - int dst_port = 0; - int sync_dev = 0; - - data_addr = host->phys_base + OMAP_MMC_REG(host, DATA); - frame = data->blksz; - count = sg_dma_len(sg); - - if ((data->blocks == 1) && (count > data->blksz)) - count = frame; - - host->dma_len = count; - - /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx. - * Use 16 or 32 word frames when the blocksize is at least that large. - * Blocksize is usually 512 bytes; but not for some SD reads. - */ - if (cpu_is_omap15xx() && frame > 32) - frame = 32; - else if (frame > 64) - frame = 64; - count /= frame; - frame >>= 1; - - if (!(data->flags & MMC_DATA_WRITE)) { - buf = 0x800f | ((frame - 1) << 8); - - if (cpu_class_is_omap1()) { - src_port = OMAP_DMA_PORT_TIPB; - dst_port = OMAP_DMA_PORT_EMIFF; - } - if (cpu_is_omap24xx()) - sync_dev = OMAP24XX_DMA_MMC1_RX; - - omap_set_dma_src_params(dma_ch, src_port, - OMAP_DMA_AMODE_CONSTANT, - data_addr, 0, 0); - omap_set_dma_dest_params(dma_ch, dst_port, - OMAP_DMA_AMODE_POST_INC, - sg_dma_address(sg), 0, 0); - omap_set_dma_dest_data_pack(dma_ch, 1); - omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); - } else { - buf = 0x0f80 | ((frame - 1) << 0); - - if (cpu_class_is_omap1()) { - src_port = OMAP_DMA_PORT_EMIFF; - dst_port = OMAP_DMA_PORT_TIPB; - } - if (cpu_is_omap24xx()) - sync_dev = OMAP24XX_DMA_MMC1_TX; - - omap_set_dma_dest_params(dma_ch, dst_port, - OMAP_DMA_AMODE_CONSTANT, - data_addr, 0, 0); - omap_set_dma_src_params(dma_ch, src_port, - OMAP_DMA_AMODE_POST_INC, - sg_dma_address(sg), 0, 0); - omap_set_dma_src_data_pack(dma_ch, 1); - omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); - } + struct mmc_omap_host *host = priv; + struct mmc_data *data = host->data; - /* Max limit for DMA frame count is 0xffff */ - BUG_ON(count > 0xffff); + /* If we got to the end of DMA, assume everything went well */ + data->bytes_xfered += data->blocks * data->blksz; - OMAP_MMC_WRITE(host, BUF, buf); - omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16, - frame, count, OMAP_DMA_SYNC_FRAME, - sync_dev, 0); -} - -/* A scatterlist segment completed */ -static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data) -{ - struct mmc_omap_host *host = (struct mmc_omap_host *) data; - struct mmc_data *mmcdat = host->data; - - if (unlikely(host->dma_ch < 0)) { - dev_err(mmc_dev(host->mmc), - "DMA callback while DMA not enabled\n"); - return; - } - /* FIXME: We really should do something to _handle_ the errors */ - if (ch_status & OMAP1_DMA_TOUT_IRQ) { - dev_err(mmc_dev(host->mmc),"DMA timeout\n"); - return; - } - if (ch_status & OMAP_DMA_DROP_IRQ) { - dev_err(mmc_dev(host->mmc), "DMA sync error\n"); - return; - } - if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { - return; - } - mmcdat->bytes_xfered += host->dma_len; - host->sg_idx++; - if (host->sg_idx < host->sg_len) { - mmc_omap_prepare_dma(host, host->data); - omap_start_dma(host->dma_ch); - } else - mmc_omap_dma_done(host, host->data); -} - -static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data) -{ - const char *dma_dev_name; - int sync_dev, dma_ch, is_read, r; - - is_read = !(data->flags & MMC_DATA_WRITE); - del_timer_sync(&host->dma_timer); - if (host->dma_ch >= 0) { - if (is_read == host->dma_is_read) - return 0; - omap_free_dma(host->dma_ch); - host->dma_ch = -1; - } - - if (is_read) { - if (host->id == 0) { - sync_dev = OMAP_DMA_MMC_RX; - dma_dev_name = "MMC1 read"; - } else { - sync_dev = OMAP_DMA_MMC2_RX; - dma_dev_name = "MMC2 read"; - } - } else { - if (host->id == 0) { - sync_dev = OMAP_DMA_MMC_TX; - dma_dev_name = "MMC1 write"; - } else { - sync_dev = OMAP_DMA_MMC2_TX; - dma_dev_name = "MMC2 write"; - } - } - r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb, - host, &dma_ch); - if (r != 0) { - dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r); - return r; - } - host->dma_ch = dma_ch; - host->dma_is_read = is_read; - - return 0; + mmc_omap_dma_done(host, data); } static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req) @@ -1118,33 +973,85 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req) host->sg_idx = 0; if (use_dma) { - if (mmc_omap_get_dma_channel(host, data) == 0) { - enum dma_data_direction dma_data_dir; - - if (data->flags & MMC_DATA_WRITE) - dma_data_dir = DMA_TO_DEVICE; - else - dma_data_dir = DMA_FROM_DEVICE; - - host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, - sg_len, dma_data_dir); - host->total_bytes_left = 0; - mmc_omap_prepare_dma(host, req->data); - host->brs_received = 0; - host->dma_done = 0; - host->dma_in_use = 1; - } else - use_dma = 0; + enum dma_data_direction dma_data_dir; + struct dma_async_tx_descriptor *tx; + struct dma_chan *c; + u32 burst, *bp; + u16 buf; + + /* + * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx + * and 24xx. Use 16 or 32 word frames when the + * blocksize is at least that large. Blocksize is + * usually 512 bytes; but not for some SD reads. + */ + burst = cpu_is_omap15xx() ? 32 : 64; + if (burst > data->blksz) + burst = data->blksz; + + burst >>= 1; + + if (data->flags & MMC_DATA_WRITE) { + c = host->dma_tx; + bp = &host->dma_tx_burst; + buf = 0x0f80 | (burst - 1) << 0; + dma_data_dir = DMA_TO_DEVICE; + } else { + c = host->dma_rx; + bp = &host->dma_rx_burst; + buf = 0x800f | (burst - 1) << 8; + dma_data_dir = DMA_FROM_DEVICE; + } + + if (!c) + goto use_pio; + + /* Only reconfigure if we have a different burst size */ + if (*bp != burst) { + struct dma_slave_config cfg; + + cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA); + cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA); + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + cfg.src_maxburst = burst; + cfg.dst_maxburst = burst; + + if (dmaengine_slave_config(c, &cfg)) + goto use_pio; + + *bp = burst; + } + + host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len, + dma_data_dir); + if (host->sg_len == 0) + goto use_pio; + + tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len, + data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx) + goto use_pio; + + OMAP_MMC_WRITE(host, BUF, buf); + + tx->callback = mmc_omap_dma_callback; + tx->callback_param = host; + dmaengine_submit(tx); + host->brs_received = 0; + host->dma_done = 0; + host->dma_in_use = 1; + return; } + use_pio: /* Revert to PIO? */ - if (!use_dma) { - OMAP_MMC_WRITE(host, BUF, 0x1f1f); - host->total_bytes_left = data->blocks * block_size; - host->sg_len = sg_len; - mmc_omap_sg_to_buf(host); - host->dma_in_use = 0; - } + OMAP_MMC_WRITE(host, BUF, 0x1f1f); + host->total_bytes_left = data->blocks * block_size; + host->sg_len = sg_len; + mmc_omap_sg_to_buf(host); + host->dma_in_use = 0; } static void mmc_omap_start_request(struct mmc_omap_host *host, @@ -1157,8 +1064,12 @@ static void mmc_omap_start_request(struct mmc_omap_host *host, /* only touch fifo AFTER the controller readies it */ mmc_omap_prepare_data(host, req); mmc_omap_start_command(host, req->cmd); - if (host->dma_in_use) - omap_start_dma(host->dma_ch); + if (host->dma_in_use) { + struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ? + host->dma_tx : host->dma_rx; + + dma_async_issue_pending(c); + } } static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req) @@ -1400,6 +1311,8 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev) struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; struct mmc_omap_host *host = NULL; struct resource *res; + dma_cap_mask_t mask; + unsigned sig; int i, ret = 0; int irq; @@ -1439,7 +1352,6 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev) setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host); spin_lock_init(&host->dma_lock); - setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host); spin_lock_init(&host->slot_lock); init_waitqueue_head(&host->slot_wq); @@ -1450,11 +1362,7 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev) host->id = pdev->id; host->mem_res = res; host->irq = irq; - host->use_dma = 1; - host->dev->dma_mask = &pdata->dma_mask; - host->dma_ch = -1; - host->irq = irq; host->phys_base = host->mem_res->start; host->virt_base = ioremap(res->start, resource_size(res)); @@ -1474,9 +1382,48 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev) goto err_free_iclk; } + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + host->dma_tx_burst = -1; + host->dma_rx_burst = -1; + + if (cpu_is_omap24xx()) + sig = host->id == 0 ? OMAP24XX_DMA_MMC1_TX : OMAP24XX_DMA_MMC2_TX; + else + sig = host->id == 0 ? OMAP_DMA_MMC_TX : OMAP_DMA_MMC2_TX; + host->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig); +#if 0 + if (!host->dma_tx) { + dev_err(host->dev, "unable to obtain TX DMA engine channel %u\n", + sig); + goto err_dma; + } +#else + if (!host->dma_tx) + dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n", + sig); +#endif + if (cpu_is_omap24xx()) + sig = host->id == 0 ? OMAP24XX_DMA_MMC1_RX : OMAP24XX_DMA_MMC2_RX; + else + sig = host->id == 0 ? OMAP_DMA_MMC_RX : OMAP_DMA_MMC2_RX; + host->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig); +#if 0 + if (!host->dma_rx) { + dev_err(host->dev, "unable to obtain RX DMA engine channel %u\n", + sig); + goto err_dma; + } +#else + if (!host->dma_rx) + dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n", + sig); +#endif + ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host); if (ret) - goto err_free_fclk; + goto err_free_dma; if (pdata->init != NULL) { ret = pdata->init(&pdev->dev); @@ -1510,7 +1457,11 @@ err_plat_cleanup: pdata->cleanup(&pdev->dev); err_free_irq: free_irq(host->irq, host); -err_free_fclk: +err_free_dma: + if (host->dma_tx) + dma_release_channel(host->dma_tx); + if (host->dma_rx) + dma_release_channel(host->dma_rx); clk_put(host->fclk); err_free_iclk: clk_disable(host->iclk); @@ -1545,6 +1496,11 @@ static int __devexit mmc_omap_remove(struct platform_device *pdev) clk_disable(host->iclk); clk_put(host->iclk); + if (host->dma_tx) + dma_release_channel(host->dma_tx); + if (host->dma_rx) + dma_release_channel(host->dma_rx); + iounmap(host->virt_base); release_mem_region(pdev->resource[0].start, pdev->resource[0].end - pdev->resource[0].start + 1); diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index bc28627af66b..3a09f93cc3b6 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -19,6 +19,7 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/debugfs.h> +#include <linux/dmaengine.h> #include <linux/seq_file.h> #include <linux/interrupt.h> #include <linux/delay.h> @@ -29,6 +30,7 @@ #include <linux/of.h> #include <linux/of_gpio.h> #include <linux/of_device.h> +#include <linux/omap-dma.h> #include <linux/mmc/host.h> #include <linux/mmc/core.h> #include <linux/mmc/mmc.h> @@ -37,7 +39,6 @@ #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/pm_runtime.h> -#include <plat/dma.h> #include <mach/hardware.h> #include <plat/board.h> #include <plat/mmc.h> @@ -166,7 +167,8 @@ struct omap_hsmmc_host { int suspended; int irq; int use_dma, dma_ch; - int dma_line_tx, dma_line_rx; + struct dma_chan *tx_chan; + struct dma_chan *rx_chan; int slot_id; int response_busy; int context_loss; @@ -797,6 +799,12 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data) return DMA_FROM_DEVICE; } +static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host, + struct mmc_data *data) +{ + return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; +} + static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq) { int dma_ch; @@ -889,10 +897,13 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) spin_unlock_irqrestore(&host->irq_lock, flags); if (host->use_dma && dma_ch != -1) { - dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, - host->data->sg_len, + struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data); + + dmaengine_terminate_all(chan); + dma_unmap_sg(chan->device->dev, + host->data->sg, host->data->sg_len, omap_hsmmc_get_dma_dir(host, host->data)); - omap_free_dma(dma_ch); + host->data->host_cookie = 0; } host->data = NULL; @@ -1190,90 +1201,29 @@ static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id) return IRQ_HANDLED; } -static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host, - struct mmc_data *data) -{ - int sync_dev; - - if (data->flags & MMC_DATA_WRITE) - sync_dev = host->dma_line_tx; - else - sync_dev = host->dma_line_rx; - return sync_dev; -} - -static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host, - struct mmc_data *data, - struct scatterlist *sgl) -{ - int blksz, nblk, dma_ch; - - dma_ch = host->dma_ch; - if (data->flags & MMC_DATA_WRITE) { - omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - (host->mapbase + OMAP_HSMMC_DATA), 0, 0); - omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - sg_dma_address(sgl), 0, 0); - } else { - omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - (host->mapbase + OMAP_HSMMC_DATA), 0, 0); - omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - sg_dma_address(sgl), 0, 0); - } - - blksz = host->data->blksz; - nblk = sg_dma_len(sgl) / blksz; - - omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32, - blksz / 4, nblk, OMAP_DMA_SYNC_FRAME, - omap_hsmmc_get_dma_sync_dev(host, data), - !(data->flags & MMC_DATA_WRITE)); - - omap_start_dma(dma_ch); -} - -/* - * DMA call back function - */ -static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) +static void omap_hsmmc_dma_callback(void *param) { - struct omap_hsmmc_host *host = cb_data; + struct omap_hsmmc_host *host = param; + struct dma_chan *chan; struct mmc_data *data; - int dma_ch, req_in_progress; - unsigned long flags; - - if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { - dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n", - ch_status); - return; - } + int req_in_progress; - spin_lock_irqsave(&host->irq_lock, flags); + spin_lock_irq(&host->irq_lock); if (host->dma_ch < 0) { - spin_unlock_irqrestore(&host->irq_lock, flags); + spin_unlock_irq(&host->irq_lock); return; } data = host->mrq->data; - host->dma_sg_idx++; - if (host->dma_sg_idx < host->dma_len) { - /* Fire up the next transfer. */ - omap_hsmmc_config_dma_params(host, data, - data->sg + host->dma_sg_idx); - spin_unlock_irqrestore(&host->irq_lock, flags); - return; - } - + chan = omap_hsmmc_get_dma_chan(host, data); if (!data->host_cookie) - dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + dma_unmap_sg(chan->device->dev, + data->sg, data->sg_len, omap_hsmmc_get_dma_dir(host, data)); req_in_progress = host->req_in_progress; - dma_ch = host->dma_ch; host->dma_ch = -1; - spin_unlock_irqrestore(&host->irq_lock, flags); - - omap_free_dma(dma_ch); + spin_unlock_irq(&host->irq_lock); /* If DMA has finished after TC, complete the request */ if (!req_in_progress) { @@ -1286,7 +1236,8 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, struct mmc_data *data, - struct omap_hsmmc_next *next) + struct omap_hsmmc_next *next, + struct dma_chan *chan) { int dma_len; @@ -1301,8 +1252,7 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, /* Check if next job is already prepared */ if (next || (!next && data->host_cookie != host->next_data.cookie)) { - dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, + dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len, omap_hsmmc_get_dma_dir(host, data)); } else { @@ -1329,8 +1279,11 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, struct mmc_request *req) { - int dma_ch = 0, ret = 0, i; + struct dma_slave_config cfg; + struct dma_async_tx_descriptor *tx; + int ret = 0, i; struct mmc_data *data = req->data; + struct dma_chan *chan; /* Sanity check: all the SG entries must be aligned by block size. */ for (i = 0; i < data->sg_len; i++) { @@ -1348,22 +1301,41 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, BUG_ON(host->dma_ch != -1); - ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data), - "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch); - if (ret != 0) { - dev_err(mmc_dev(host->mmc), - "%s: omap_request_dma() failed with %d\n", - mmc_hostname(host->mmc), ret); + chan = omap_hsmmc_get_dma_chan(host, data); + + cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA; + cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = data->blksz / 4; + cfg.dst_maxburst = data->blksz / 4; + + ret = dmaengine_slave_config(chan, &cfg); + if (ret) return ret; - } - ret = omap_hsmmc_pre_dma_transfer(host, data, NULL); + + ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan); if (ret) return ret; - host->dma_ch = dma_ch; - host->dma_sg_idx = 0; + tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, + data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx) { + dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n"); + /* FIXME: cleanup */ + return -1; + } + + tx->callback = omap_hsmmc_dma_callback; + tx->callback_param = host; - omap_hsmmc_config_dma_params(host, data, data->sg); + /* Does not fail */ + dmaengine_submit(tx); + + host->dma_ch = 1; + + dma_async_issue_pending(chan); return 0; } @@ -1445,11 +1417,11 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, struct omap_hsmmc_host *host = mmc_priv(mmc); struct mmc_data *data = mrq->data; - if (host->use_dma) { - if (data->host_cookie) - dma_unmap_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, - omap_hsmmc_get_dma_dir(host, data)); + if (host->use_dma && data->host_cookie) { + struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data); + + dma_unmap_sg(c->device->dev, data->sg, data->sg_len, + omap_hsmmc_get_dma_dir(host, data)); data->host_cookie = 0; } } @@ -1464,10 +1436,13 @@ static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, return ; } - if (host->use_dma) + if (host->use_dma) { + struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data); + if (omap_hsmmc_pre_dma_transfer(host, mrq->data, - &host->next_data)) + &host->next_data, c)) mrq->data->host_cookie = 0; + } } /* @@ -1800,6 +1775,8 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) struct resource *res; int ret, irq; const struct of_device_id *match; + dma_cap_mask_t mask; + unsigned tx_req, rx_req; match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev); if (match) { @@ -1844,7 +1821,6 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) host->pdata = pdata; host->dev = &pdev->dev; host->use_dma = 1; - host->dev->dma_mask = &pdata->dma_mask; host->dma_ch = -1; host->irq = irq; host->slot_id = 0; @@ -1934,7 +1910,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) ret = -ENXIO; goto err_irq; } - host->dma_line_tx = res->start; + tx_req = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); if (!res) { @@ -1942,7 +1918,24 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev) ret = -ENXIO; goto err_irq; } - host->dma_line_rx = res->start; + rx_req = res->start; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + host->rx_chan = dma_request_channel(mask, omap_dma_filter_fn, &rx_req); + if (!host->rx_chan) { + dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req); + ret = -ENXIO; + goto err_irq; + } + + host->tx_chan = dma_request_channel(mask, omap_dma_filter_fn, &tx_req); + if (!host->tx_chan) { + dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req); + ret = -ENXIO; + goto err_irq; + } /* Request IRQ for MMC operations */ ret = request_irq(host->irq, omap_hsmmc_irq, 0, @@ -2021,6 +2014,10 @@ err_reg: err_irq_cd_init: free_irq(host->irq, host); err_irq: + if (host->tx_chan) + dma_release_channel(host->tx_chan); + if (host->rx_chan) + dma_release_channel(host->rx_chan); pm_runtime_put_sync(host->dev); pm_runtime_disable(host->dev); clk_put(host->fclk); @@ -2056,6 +2053,11 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev) if (mmc_slot(host).card_detect_irq) free_irq(mmc_slot(host).card_detect_irq, host); + if (host->tx_chan) + dma_release_channel(host->tx_chan); + if (host->rx_chan) + dma_release_channel(host->rx_chan); + pm_runtime_put_sync(host->dev); pm_runtime_disable(host->dev); clk_put(host->fclk); diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index d7f681d0c9b9..e9309b3659e7 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c @@ -9,6 +9,7 @@ */ #include <linux/platform_device.h> +#include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/module.h> @@ -18,6 +19,7 @@ #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> +#include <linux/omap-dma.h> #include <linux/io.h> #include <linux/slab.h> @@ -123,7 +125,7 @@ struct omap_nand_info { int gpmc_cs; unsigned long phys_base; struct completion comp; - int dma_ch; + struct dma_chan *dma; int gpmc_irq; enum { OMAP_NAND_IO_READ = 0, /* read */ @@ -336,12 +338,10 @@ static void omap_write_buf_pref(struct mtd_info *mtd, } /* - * omap_nand_dma_cb: callback on the completion of dma transfer - * @lch: logical channel - * @ch_satuts: channel status + * omap_nand_dma_callback: callback on the completion of dma transfer * @data: pointer to completion data structure */ -static void omap_nand_dma_cb(int lch, u16 ch_status, void *data) +static void omap_nand_dma_callback(void *data) { complete((struct completion *) data); } @@ -358,17 +358,13 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, { struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, mtd); + struct dma_async_tx_descriptor *tx; enum dma_data_direction dir = is_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; - dma_addr_t dma_addr; - int ret; + struct scatterlist sg; unsigned long tim, limit; - - /* The fifo depth is 64 bytes max. - * But configure the FIFO-threahold to 32 to get a sync at each frame - * and frame length is 32 bytes. - */ - int buf_len = len >> 6; + unsigned n; + int ret; if (addr >= high_memory) { struct page *p1; @@ -382,40 +378,33 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, addr = page_address(p1) + ((size_t)addr & ~PAGE_MASK); } - dma_addr = dma_map_single(&info->pdev->dev, addr, len, dir); - if (dma_mapping_error(&info->pdev->dev, dma_addr)) { + sg_init_one(&sg, addr, len); + n = dma_map_sg(info->dma->device->dev, &sg, 1, dir); + if (n == 0) { dev_err(&info->pdev->dev, "Couldn't DMA map a %d byte buffer\n", len); goto out_copy; } - if (is_write) { - omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - info->phys_base, 0, 0); - omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - dma_addr, 0, 0); - omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, - 0x10, buf_len, OMAP_DMA_SYNC_FRAME, - OMAP24XX_DMA_GPMC, OMAP_DMA_DST_SYNC); - } else { - omap_set_dma_src_params(info->dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - info->phys_base, 0, 0); - omap_set_dma_dest_params(info->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - dma_addr, 0, 0); - omap_set_dma_transfer_params(info->dma_ch, OMAP_DMA_DATA_TYPE_S32, - 0x10, buf_len, OMAP_DMA_SYNC_FRAME, - OMAP24XX_DMA_GPMC, OMAP_DMA_SRC_SYNC); - } - /* configure and start prefetch transfer */ + tx = dmaengine_prep_slave_sg(info->dma, &sg, n, + is_write ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx) + goto out_copy_unmap; + + tx->callback = omap_nand_dma_callback; + tx->callback_param = &info->comp; + dmaengine_submit(tx); + + /* configure and start prefetch transfer */ ret = gpmc_prefetch_enable(info->gpmc_cs, - PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); + PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); if (ret) /* PFPW engine is busy, use cpu copy method */ goto out_copy_unmap; init_completion(&info->comp); - - omap_start_dma(info->dma_ch); + dma_async_issue_pending(info->dma); /* setup and start DMA using dma_addr */ wait_for_completion(&info->comp); @@ -427,11 +416,11 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr, /* disable and stop the PFPW engine */ gpmc_prefetch_reset(info->gpmc_cs); - dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); + dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); return 0; out_copy_unmap: - dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); + dma_unmap_sg(info->dma->device->dev, &sg, 1, dir); out_copy: if (info->nand.options & NAND_BUSWIDTH_16) is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len) @@ -1164,6 +1153,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) struct omap_nand_platform_data *pdata; int err; int i, offset; + dma_cap_mask_t mask; + unsigned sig; pdata = pdev->dev.platform_data; if (pdata == NULL) { @@ -1244,18 +1235,31 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) break; case NAND_OMAP_PREFETCH_DMA: - err = omap_request_dma(OMAP24XX_DMA_GPMC, "NAND", - omap_nand_dma_cb, &info->comp, &info->dma_ch); - if (err < 0) { - info->dma_ch = -1; - dev_err(&pdev->dev, "DMA request failed!\n"); + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + sig = OMAP24XX_DMA_GPMC; + info->dma = dma_request_channel(mask, omap_dma_filter_fn, &sig); + if (!info->dma) { + dev_err(&pdev->dev, "DMA engine request failed\n"); + err = -ENXIO; goto out_release_mem_region; } else { - omap_set_dma_dest_burst_mode(info->dma_ch, - OMAP_DMA_DATA_BURST_16); - omap_set_dma_src_burst_mode(info->dma_ch, - OMAP_DMA_DATA_BURST_16); - + struct dma_slave_config cfg; + int rc; + + memset(&cfg, 0, sizeof(cfg)); + cfg.src_addr = info->phys_base; + cfg.dst_addr = info->phys_base; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = 16; + cfg.dst_maxburst = 16; + rc = dmaengine_slave_config(info->dma, &cfg); + if (rc) { + dev_err(&pdev->dev, "DMA engine slave config failed: %d\n", + rc); + goto out_release_mem_region; + } info->nand.read_buf = omap_read_buf_dma_pref; info->nand.write_buf = omap_write_buf_dma_pref; } @@ -1358,6 +1362,8 @@ static int __devinit omap_nand_probe(struct platform_device *pdev) return 0; out_release_mem_region: + if (info->dma) + dma_release_channel(info->dma); release_mem_region(info->phys_base, NAND_IO_SIZE); out_free_info: kfree(info); @@ -1373,8 +1379,8 @@ static int omap_nand_remove(struct platform_device *pdev) omap3_free_bch(&info->mtd); platform_set_drvdata(pdev, NULL); - if (info->dma_ch != -1) - omap_free_dma(info->dma_ch); + if (info->dma) + dma_release_channel(info->dma); if (info->gpmc_irq) free_irq(info->gpmc_irq, info); diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c index 57bf1d7ee80f..9ab24528f9b9 100644 --- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c +++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c @@ -1188,7 +1188,7 @@ exit: kfree(buf); /* close file before return */ if (fp) - filp_close(fp, current->files); + filp_close(fp, NULL); /* restore previous address limit */ set_fs(old_fs); diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 2d1e68db9b3f..e894ca7b54c0 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c @@ -4146,45 +4146,7 @@ fc_bsg_rportadd(struct Scsi_Host *shost, struct fc_rport *rport) static void fc_bsg_remove(struct request_queue *q) { - struct request *req; /* block request */ - int counts; /* totals for request_list count and starved */ - if (q) { - /* Stop taking in new requests */ - spin_lock_irq(q->queue_lock); - blk_stop_queue(q); - - /* drain all requests in the queue */ - while (1) { - /* need the lock to fetch a request - * this may fetch the same reqeust as the previous pass - */ - req = blk_fetch_request(q); - /* save requests in use and starved */ - counts = q->rq.count[0] + q->rq.count[1] + - q->rq.starved[0] + q->rq.starved[1]; - spin_unlock_irq(q->queue_lock); - /* any requests still outstanding? */ - if (counts == 0) - break; - - /* This may be the same req as the previous iteration, - * always send the blk_end_request_all after a prefetch. - * It is not okay to not end the request because the - * prefetch started the request. - */ - if (req) { - /* return -ENXIO to indicate that this queue is - * going away - */ - req->errors = -ENXIO; - blk_end_request_all(req, -ENXIO); - } - - msleep(200); /* allow bsg to possibly finish */ - spin_lock_irq(q->queue_lock); - } - bsg_unregister_queue(q); blk_cleanup_queue(q); } diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 09809d06eccb..fa1dfaa83e32 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -575,7 +575,7 @@ static int iscsi_remove_host(struct transport_container *tc, struct iscsi_cls_host *ihost = shost->shost_data; if (ihost->bsg_q) { - bsg_remove_queue(ihost->bsg_q); + bsg_unregister_queue(ihost->bsg_q); blk_cleanup_queue(ihost->bsg_q); } return 0; diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c index 7d46b15e1520..bc4778175e34 100644 --- a/drivers/spi/spi-omap2-mcspi.c +++ b/drivers/spi/spi-omap2-mcspi.c @@ -28,6 +28,8 @@ #include <linux/device.h> #include <linux/delay.h> #include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/omap-dma.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/clk.h> @@ -39,7 +41,6 @@ #include <linux/spi/spi.h> -#include <plat/dma.h> #include <plat/clock.h> #include <plat/mcspi.h> @@ -93,8 +94,8 @@ /* We have 2 DMA channels per CS, one for RX and one for TX */ struct omap2_mcspi_dma { - int dma_tx_channel; - int dma_rx_channel; + struct dma_chan *dma_tx; + struct dma_chan *dma_rx; int dma_tx_sync_dev; int dma_rx_sync_dev; @@ -300,20 +301,46 @@ static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit) return 0; } +static void omap2_mcspi_rx_callback(void *data) +{ + struct spi_device *spi = data; + struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); + struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select]; + + complete(&mcspi_dma->dma_rx_completion); + + /* We must disable the DMA RX request */ + omap2_mcspi_set_dma_req(spi, 1, 0); +} + +static void omap2_mcspi_tx_callback(void *data) +{ + struct spi_device *spi = data; + struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master); + struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select]; + + complete(&mcspi_dma->dma_tx_completion); + + /* We must disable the DMA TX request */ + omap2_mcspi_set_dma_req(spi, 0, 0); +} + static unsigned omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) { struct omap2_mcspi *mcspi; struct omap2_mcspi_cs *cs = spi->controller_state; struct omap2_mcspi_dma *mcspi_dma; - unsigned int count, c; - unsigned long base, tx_reg, rx_reg; - int word_len, data_type, element_count; + unsigned int count; + int word_len, element_count; int elements = 0; u32 l; u8 * rx; const u8 * tx; void __iomem *chstat_reg; + struct dma_slave_config cfg; + enum dma_slave_buswidth width; + unsigned es; mcspi = spi_master_get_devdata(spi->master); mcspi_dma = &mcspi->dma_channels[spi->chip_select]; @@ -321,68 +348,92 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0; + if (cs->word_len <= 8) { + width = DMA_SLAVE_BUSWIDTH_1_BYTE; + es = 1; + } else if (cs->word_len <= 16) { + width = DMA_SLAVE_BUSWIDTH_2_BYTES; + es = 2; + } else { + width = DMA_SLAVE_BUSWIDTH_4_BYTES; + es = 4; + } + + memset(&cfg, 0, sizeof(cfg)); + cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0; + cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0; + cfg.src_addr_width = width; + cfg.dst_addr_width = width; + cfg.src_maxburst = 1; + cfg.dst_maxburst = 1; + + if (xfer->tx_buf && mcspi_dma->dma_tx) { + struct dma_async_tx_descriptor *tx; + struct scatterlist sg; + + dmaengine_slave_config(mcspi_dma->dma_tx, &cfg); + + sg_init_table(&sg, 1); + sg_dma_address(&sg) = xfer->tx_dma; + sg_dma_len(&sg) = xfer->len; + + tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (tx) { + tx->callback = omap2_mcspi_tx_callback; + tx->callback_param = spi; + dmaengine_submit(tx); + } else { + /* FIXME: fall back to PIO? */ + } + } + + if (xfer->rx_buf && mcspi_dma->dma_rx) { + struct dma_async_tx_descriptor *tx; + struct scatterlist sg; + size_t len = xfer->len - es; + + dmaengine_slave_config(mcspi_dma->dma_rx, &cfg); + + if (l & OMAP2_MCSPI_CHCONF_TURBO) + len -= es; + + sg_init_table(&sg, 1); + sg_dma_address(&sg) = xfer->rx_dma; + sg_dma_len(&sg) = len; + + tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1, + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (tx) { + tx->callback = omap2_mcspi_rx_callback; + tx->callback_param = spi; + dmaengine_submit(tx); + } else { + /* FIXME: fall back to PIO? */ + } + } + count = xfer->len; - c = count; word_len = cs->word_len; - base = cs->phys; - tx_reg = base + OMAP2_MCSPI_TX0; - rx_reg = base + OMAP2_MCSPI_RX0; rx = xfer->rx_buf; tx = xfer->tx_buf; if (word_len <= 8) { - data_type = OMAP_DMA_DATA_TYPE_S8; element_count = count; } else if (word_len <= 16) { - data_type = OMAP_DMA_DATA_TYPE_S16; element_count = count >> 1; } else /* word_len <= 32 */ { - data_type = OMAP_DMA_DATA_TYPE_S32; element_count = count >> 2; } if (tx != NULL) { - omap_set_dma_transfer_params(mcspi_dma->dma_tx_channel, - data_type, element_count, 1, - OMAP_DMA_SYNC_ELEMENT, - mcspi_dma->dma_tx_sync_dev, 0); - - omap_set_dma_dest_params(mcspi_dma->dma_tx_channel, 0, - OMAP_DMA_AMODE_CONSTANT, - tx_reg, 0, 0); - - omap_set_dma_src_params(mcspi_dma->dma_tx_channel, 0, - OMAP_DMA_AMODE_POST_INC, - xfer->tx_dma, 0, 0); - } - - if (rx != NULL) { - elements = element_count - 1; - if (l & OMAP2_MCSPI_CHCONF_TURBO) - elements--; - - omap_set_dma_transfer_params(mcspi_dma->dma_rx_channel, - data_type, elements, 1, - OMAP_DMA_SYNC_ELEMENT, - mcspi_dma->dma_rx_sync_dev, 1); - - omap_set_dma_src_params(mcspi_dma->dma_rx_channel, 0, - OMAP_DMA_AMODE_CONSTANT, - rx_reg, 0, 0); - - omap_set_dma_dest_params(mcspi_dma->dma_rx_channel, 0, - OMAP_DMA_AMODE_POST_INC, - xfer->rx_dma, 0, 0); - } - - if (tx != NULL) { - omap_start_dma(mcspi_dma->dma_tx_channel); + dma_async_issue_pending(mcspi_dma->dma_tx); omap2_mcspi_set_dma_req(spi, 0, 1); } if (rx != NULL) { - omap_start_dma(mcspi_dma->dma_rx_channel); + dma_async_issue_pending(mcspi_dma->dma_rx); omap2_mcspi_set_dma_req(spi, 1, 1); } @@ -408,7 +459,10 @@ omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer) DMA_FROM_DEVICE); omap2_mcspi_set_enable(spi, 0); + elements = element_count - 1; + if (l & OMAP2_MCSPI_CHCONF_TURBO) { + elements--; if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0) & OMAP2_MCSPI_CHSTAT_RXS)) { @@ -725,64 +779,38 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi, return 0; } -static void omap2_mcspi_dma_rx_callback(int lch, u16 ch_status, void *data) -{ - struct spi_device *spi = data; - struct omap2_mcspi *mcspi; - struct omap2_mcspi_dma *mcspi_dma; - - mcspi = spi_master_get_devdata(spi->master); - mcspi_dma = &(mcspi->dma_channels[spi->chip_select]); - - complete(&mcspi_dma->dma_rx_completion); - - /* We must disable the DMA RX request */ - omap2_mcspi_set_dma_req(spi, 1, 0); -} - -static void omap2_mcspi_dma_tx_callback(int lch, u16 ch_status, void *data) -{ - struct spi_device *spi = data; - struct omap2_mcspi *mcspi; - struct omap2_mcspi_dma *mcspi_dma; - - mcspi = spi_master_get_devdata(spi->master); - mcspi_dma = &(mcspi->dma_channels[spi->chip_select]); - - complete(&mcspi_dma->dma_tx_completion); - - /* We must disable the DMA TX request */ - omap2_mcspi_set_dma_req(spi, 0, 0); -} - static int omap2_mcspi_request_dma(struct spi_device *spi) { struct spi_master *master = spi->master; struct omap2_mcspi *mcspi; struct omap2_mcspi_dma *mcspi_dma; + dma_cap_mask_t mask; + unsigned sig; mcspi = spi_master_get_devdata(master); mcspi_dma = mcspi->dma_channels + spi->chip_select; - if (omap_request_dma(mcspi_dma->dma_rx_sync_dev, "McSPI RX", - omap2_mcspi_dma_rx_callback, spi, - &mcspi_dma->dma_rx_channel)) { - dev_err(&spi->dev, "no RX DMA channel for McSPI\n"); + init_completion(&mcspi_dma->dma_rx_completion); + init_completion(&mcspi_dma->dma_tx_completion); + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + sig = mcspi_dma->dma_rx_sync_dev; + mcspi_dma->dma_rx = dma_request_channel(mask, omap_dma_filter_fn, &sig); + if (!mcspi_dma->dma_rx) { + dev_err(&spi->dev, "no RX DMA engine channel for McSPI\n"); return -EAGAIN; } - if (omap_request_dma(mcspi_dma->dma_tx_sync_dev, "McSPI TX", - omap2_mcspi_dma_tx_callback, spi, - &mcspi_dma->dma_tx_channel)) { - omap_free_dma(mcspi_dma->dma_rx_channel); - mcspi_dma->dma_rx_channel = -1; - dev_err(&spi->dev, "no TX DMA channel for McSPI\n"); + sig = mcspi_dma->dma_tx_sync_dev; + mcspi_dma->dma_tx = dma_request_channel(mask, omap_dma_filter_fn, &sig); + if (!mcspi_dma->dma_tx) { + dev_err(&spi->dev, "no TX DMA engine channel for McSPI\n"); + dma_release_channel(mcspi_dma->dma_rx); + mcspi_dma->dma_rx = NULL; return -EAGAIN; } - init_completion(&mcspi_dma->dma_rx_completion); - init_completion(&mcspi_dma->dma_tx_completion); - return 0; } @@ -814,8 +842,7 @@ static int omap2_mcspi_setup(struct spi_device *spi) list_add_tail(&cs->node, &ctx->cs); } - if (mcspi_dma->dma_rx_channel == -1 - || mcspi_dma->dma_tx_channel == -1) { + if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) { ret = omap2_mcspi_request_dma(spi); if (ret < 0) return ret; @@ -850,13 +877,13 @@ static void omap2_mcspi_cleanup(struct spi_device *spi) if (spi->chip_select < spi->master->num_chipselect) { mcspi_dma = &mcspi->dma_channels[spi->chip_select]; - if (mcspi_dma->dma_rx_channel != -1) { - omap_free_dma(mcspi_dma->dma_rx_channel); - mcspi_dma->dma_rx_channel = -1; + if (mcspi_dma->dma_rx) { + dma_release_channel(mcspi_dma->dma_rx); + mcspi_dma->dma_rx = NULL; } - if (mcspi_dma->dma_tx_channel != -1) { - omap_free_dma(mcspi_dma->dma_tx_channel); - mcspi_dma->dma_tx_channel = -1; + if (mcspi_dma->dma_tx) { + dma_release_channel(mcspi_dma->dma_tx); + mcspi_dma->dma_tx = NULL; } } } @@ -1176,7 +1203,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev) break; } - mcspi->dma_channels[i].dma_rx_channel = -1; mcspi->dma_channels[i].dma_rx_sync_dev = dma_res->start; sprintf(dma_ch_name, "tx%d", i); dma_res = platform_get_resource_byname(pdev, IORESOURCE_DMA, @@ -1187,7 +1213,6 @@ static int __devinit omap2_mcspi_probe(struct platform_device *pdev) break; } - mcspi->dma_channels[i].dma_tx_channel = -1; mcspi->dma_channels[i].dma_tx_sync_dev = dma_res->start; } diff --git a/drivers/staging/bcm/Misc.c b/drivers/staging/bcm/Misc.c index 9a60d4cd2184..f545716c666d 100644 --- a/drivers/staging/bcm/Misc.c +++ b/drivers/staging/bcm/Misc.c @@ -157,12 +157,7 @@ static int create_worker_threads(struct bcm_mini_adapter *psAdapter) static struct file *open_firmware_file(struct bcm_mini_adapter *Adapter, const char *path) { - struct file *flp = NULL; - mm_segment_t oldfs; - oldfs = get_fs(); - set_fs(get_ds()); - flp = filp_open(path, O_RDONLY, S_IRWXU); - set_fs(oldfs); + struct file *flp = filp_open(path, O_RDONLY, S_IRWXU); if (IS_ERR(flp)) { pr_err(DRV_NAME "Unable To Open File %s, err %ld", path, PTR_ERR(flp)); flp = NULL; @@ -183,14 +178,12 @@ static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, u { int errorno = 0; struct file *flp = NULL; - mm_segment_t oldfs; struct timeval tv = {0}; flp = open_firmware_file(Adapter, path); if (!flp) { - errorno = -ENOENT; BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Unable to Open %s\n", path); - goto exit_download; + return -ENOENT; } BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Opened file is = %s and length =0x%lx to be downloaded at =0x%x", path, (unsigned long)flp->f_dentry->d_inode->i_size, loc); do_gettimeofday(&tv); @@ -201,10 +194,7 @@ static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, u errorno = -EIO; goto exit_download; } - oldfs = get_fs(); - set_fs(get_ds()); vfs_llseek(flp, 0, 0); - set_fs(oldfs); if (Adapter->bcm_file_readback_from_chip(Adapter->pvInterfaceAdapter, flp, loc)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Failed to read back firmware!"); errorno = -EIO; @@ -212,12 +202,7 @@ static int BcmFileDownload(struct bcm_mini_adapter *Adapter, const char *path, u } exit_download: - oldfs = get_fs(); - set_fs(get_ds()); - if (flp && !(IS_ERR(flp))) - filp_close(flp, current->files); - set_fs(oldfs); - + filp_close(flp, NULL); return errorno; } @@ -1056,10 +1041,8 @@ OUT: static int bcm_parse_target_params(struct bcm_mini_adapter *Adapter) { struct file *flp = NULL; - mm_segment_t oldfs = {0}; char *buff; int len = 0; - loff_t pos = 0; buff = kmalloc(BUFFER_1K, GFP_KERNEL); if (!buff) @@ -1079,20 +1062,16 @@ static int bcm_parse_target_params(struct bcm_mini_adapter *Adapter) Adapter->pstargetparams = NULL; return -ENOENT; } - oldfs = get_fs(); - set_fs(get_ds()); - len = vfs_read(flp, (void __user __force *)buff, BUFFER_1K, &pos); - set_fs(oldfs); + len = kernel_read(flp, 0, buff, BUFFER_1K); + filp_close(flp, NULL); if (len != sizeof(STARGETPARAMS)) { BCM_DEBUG_PRINT(Adapter, DBG_TYPE_INITEXIT, MP_INIT, DBG_LVL_ALL, "Mismatch in Target Param Structure!\n"); kfree(buff); kfree(Adapter->pstargetparams); Adapter->pstargetparams = NULL; - filp_close(flp, current->files); return -ENOENT; } - filp_close(flp, current->files); /* Check for autolink in config params */ /* diff --git a/drivers/staging/gdm72xx/sdio_boot.c b/drivers/staging/gdm72xx/sdio_boot.c index 760efee23d4a..65624bca8b3a 100644 --- a/drivers/staging/gdm72xx/sdio_boot.c +++ b/drivers/staging/gdm72xx/sdio_boot.c @@ -66,9 +66,8 @@ static int download_image(struct sdio_func *func, char *img_name) return -ENOENT; } - if (filp->f_dentry) - inode = filp->f_dentry->d_inode; - if (!inode || !S_ISREG(inode->i_mode)) { + inode = filp->f_dentry->d_inode; + if (!S_ISREG(inode->i_mode)) { printk(KERN_ERR "Invalid file type: %s\n", img_name); ret = -EINVAL; goto out; @@ -123,7 +122,7 @@ static int download_image(struct sdio_func *func, char *img_name) pno++; } out: - filp_close(filp, current->files); + filp_close(filp, NULL); return ret; } diff --git a/drivers/staging/gdm72xx/usb_boot.c b/drivers/staging/gdm72xx/usb_boot.c index fef290c38db6..e3dbd5a552ca 100644 --- a/drivers/staging/gdm72xx/usb_boot.c +++ b/drivers/staging/gdm72xx/usb_boot.c @@ -173,14 +173,12 @@ int usb_boot(struct usb_device *usbdev, u16 pid) filp = filp_open(img_name, O_RDONLY | O_LARGEFILE, 0); if (IS_ERR(filp)) { printk(KERN_ERR "Can't find %s.\n", img_name); - set_fs(fs); ret = PTR_ERR(filp); goto restore_fs; } - if (filp->f_dentry) - inode = filp->f_dentry->d_inode; - if (!inode || !S_ISREG(inode->i_mode)) { + inode = filp->f_dentry->d_inode; + if (!S_ISREG(inode->i_mode)) { printk(KERN_ERR "Invalid file type: %s\n", img_name); ret = -EINVAL; goto out; @@ -262,7 +260,7 @@ int usb_boot(struct usb_device *usbdev, u16 pid) ret = -EINVAL; } out: - filp_close(filp, current->files); + filp_close(filp, NULL); restore_fs: set_fs(fs); @@ -322,13 +320,11 @@ static int em_download_image(struct usb_device *usbdev, char *path, goto restore_fs; } - if (filp->f_dentry) { - inode = filp->f_dentry->d_inode; - if (!inode || !S_ISREG(inode->i_mode)) { - printk(KERN_ERR "Invalid file type: %s\n", path); - ret = -EINVAL; - goto out; - } + inode = filp->f_dentry->d_inode; + if (!S_ISREG(inode->i_mode)) { + printk(KERN_ERR "Invalid file type: %s\n", path); + ret = -EINVAL; + goto out; } buf = kmalloc(DOWNLOAD_CHUCK + pad_size, GFP_KERNEL); @@ -364,7 +360,7 @@ static int em_download_image(struct usb_device *usbdev, char *path, goto out; out: - filp_close(filp, current->files); + filp_close(filp, NULL); restore_fs: set_fs(fs); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 9e2100551c78..cbb5aaf3e567 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -109,46 +109,29 @@ static struct se_device *fd_create_virtdevice( struct se_subsystem_dev *se_dev, void *p) { - char *dev_p = NULL; struct se_device *dev; struct se_dev_limits dev_limits; struct queue_limits *limits; struct fd_dev *fd_dev = p; struct fd_host *fd_host = hba->hba_ptr; - mm_segment_t old_fs; struct file *file; struct inode *inode = NULL; int dev_flags = 0, flags, ret = -EINVAL; memset(&dev_limits, 0, sizeof(struct se_dev_limits)); - old_fs = get_fs(); - set_fs(get_ds()); - dev_p = getname(fd_dev->fd_dev_name); - set_fs(old_fs); - - if (IS_ERR(dev_p)) { - pr_err("getname(%s) failed: %lu\n", - fd_dev->fd_dev_name, IS_ERR(dev_p)); - ret = PTR_ERR(dev_p); - goto fail; - } /* * Use O_DSYNC by default instead of O_SYNC to forgo syncing * of pure timestamp updates. */ flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC; - file = filp_open(dev_p, flags, 0600); + file = filp_open(fd_dev->fd_dev_name, flags, 0600); if (IS_ERR(file)) { - pr_err("filp_open(%s) failed\n", dev_p); + pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name); ret = PTR_ERR(file); goto fail; } - if (!file || !file->f_dentry) { - pr_err("filp_open(%s) failed\n", dev_p); - goto fail; - } fd_dev->fd_file = file; /* * If using a block backend with this struct file, we extract @@ -212,14 +195,12 @@ static struct se_device *fd_create_virtdevice( " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id, fd_dev->fd_dev_name, fd_dev->fd_dev_size); - putname(dev_p); return dev; fail: if (fd_dev->fd_file) { filp_close(fd_dev->fd_file, NULL); fd_dev->fd_file = NULL; } - putname(dev_p); return ERR_PTR(ret); } @@ -452,14 +433,11 @@ static ssize_t fd_set_configfs_dev_params( token = match_token(ptr, tokens, args); switch (token) { case Opt_fd_dev_name: - arg_p = match_strdup(&args[0]); - if (!arg_p) { - ret = -ENOMEM; + if (match_strlcpy(fd_dev->fd_dev_name, &args[0], + FD_MAX_DEV_NAME) == 0) { + ret = -EINVAL; break; } - snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME, - "%s", arg_p); - kfree(arg_p); pr_debug("FILEIO: Referencing Path: %s\n", fd_dev->fd_dev_name); fd_dev->fbd_flags |= FBDF_HAS_PATH; diff --git a/drivers/usb/gadget/storage_common.c b/drivers/usb/gadget/storage_common.c index ae8b18869b8c..8d9bcd8207c8 100644 --- a/drivers/usb/gadget/storage_common.c +++ b/drivers/usb/gadget/storage_common.c @@ -656,9 +656,8 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) if (!(filp->f_mode & FMODE_WRITE)) ro = 1; - if (filp->f_path.dentry) - inode = filp->f_path.dentry->d_inode; - if (!inode || (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) { + inode = filp->f_path.dentry->d_inode; + if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) { LINFO(curlun, "invalid file type: %s\n", filename); goto out; } @@ -667,7 +666,7 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) * If we can't read the file, it's no good. * If we can't write the file, use it read-only. */ - if (!filp->f_op || !(filp->f_op->read || filp->f_op->aio_read)) { + if (!(filp->f_op->read || filp->f_op->aio_read)) { LINFO(curlun, "file not readable: %s\n", filename); goto out; } @@ -712,7 +711,6 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) if (fsg_lun_is_open(curlun)) fsg_lun_close(curlun); - get_file(filp); curlun->blksize = blksize; curlun->blkbits = blkbits; curlun->ro = ro; @@ -720,10 +718,10 @@ static int fsg_lun_open(struct fsg_lun *curlun, const char *filename) curlun->file_length = size; curlun->num_sectors = num_sectors; LDBG(curlun, "open backing file: %s\n", filename); - rc = 0; + return 0; out: - filp_close(filp, current->files); + fput(filp); return rc; } diff --git a/drivers/usb/gadget/u_uac1.c b/drivers/usb/gadget/u_uac1.c index af9898982059..e0c5e88e03ed 100644 --- a/drivers/usb/gadget/u_uac1.c +++ b/drivers/usb/gadget/u_uac1.c @@ -275,17 +275,17 @@ static int gaudio_close_snd_dev(struct gaudio *gau) /* Close control device */ snd = &gau->control; if (snd->filp) - filp_close(snd->filp, current->files); + filp_close(snd->filp, NULL); /* Close PCM playback device and setup substream */ snd = &gau->playback; if (snd->filp) - filp_close(snd->filp, current->files); + filp_close(snd->filp, NULL); /* Close PCM capture device and setup substream */ snd = &gau->capture; if (snd->filp) - filp_close(snd->filp, current->files); + filp_close(snd->filp, NULL); return 0; } diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c index b0b2ac335347..747442d2c0f6 100644 --- a/drivers/video/aty/aty128fb.c +++ b/drivers/video/aty/aty128fb.c @@ -90,7 +90,8 @@ #undef DEBUG #ifdef DEBUG -#define DBG(fmt, args...) printk(KERN_DEBUG "aty128fb: %s " fmt, __func__, ##args); +#define DBG(fmt, args...) \ + printk(KERN_DEBUG "aty128fb: %s " fmt, __func__, ##args); #else #define DBG(fmt, args...) #endif @@ -449,8 +450,9 @@ static int aty128_decode_var(struct fb_var_screeninfo *var, struct aty128fb_par *par); #if 0 static void __devinit aty128_get_pllinfo(struct aty128fb_par *par, - void __iomem *bios); -static void __devinit __iomem *aty128_map_ROM(struct pci_dev *pdev, const struct aty128fb_par *par); + void __iomem *bios); +static void __devinit __iomem *aty128_map_ROM(struct pci_dev *pdev, + const struct aty128fb_par *par); #endif static void aty128_timings(struct aty128fb_par *par); static void aty128_init_engine(struct aty128fb_par *par); @@ -779,7 +781,8 @@ static u32 depth_to_dst(u32 depth) #ifndef __sparc__ -static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, struct pci_dev *dev) +static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, + struct pci_dev *dev) { u16 dptr; u8 rom_type; @@ -811,13 +814,14 @@ static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, s /* Look for the PCI data to check the ROM type */ dptr = BIOS_IN16(0x18); - /* Check the PCI data signature. If it's wrong, we still assume a normal x86 ROM - * for now, until I've verified this works everywhere. The goal here is more - * to phase out Open Firmware images. + /* Check the PCI data signature. If it's wrong, we still assume a normal + * x86 ROM for now, until I've verified this works everywhere. + * The goal here is more to phase out Open Firmware images. * - * Currently, we only look at the first PCI data, we could iteratre and deal with - * them all, and we should use fb_bios_start relative to start of image and not - * relative start of ROM, but so far, I never found a dual-image ATI card + * Currently, we only look at the first PCI data, we could iteratre and + * deal with them all, and we should use fb_bios_start relative to start + * of image and not relative start of ROM, but so far, I never found a + * dual-image ATI card. * * typedef struct { * u32 signature; + 0x00 @@ -852,7 +856,8 @@ static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, s printk(KERN_INFO "aty128fb: Found HP PA-RISC ROM Image\n"); goto failed; default: - printk(KERN_INFO "aty128fb: Found unknown type %d ROM Image\n", rom_type); + printk(KERN_INFO "aty128fb: Found unknown type %d ROM Image\n", + rom_type); goto failed; } anyway: @@ -863,7 +868,8 @@ static void __iomem * __devinit aty128_map_ROM(const struct aty128fb_par *par, s return NULL; } -static void __devinit aty128_get_pllinfo(struct aty128fb_par *par, unsigned char __iomem *bios) +static void __devinit aty128_get_pllinfo(struct aty128fb_par *par, + unsigned char __iomem *bios) { unsigned int bios_hdr; unsigned int bios_pll; @@ -1247,10 +1253,13 @@ static int aty128_crtc_to_var(const struct aty128_crtc *crtc, static void aty128_set_crt_enable(struct aty128fb_par *par, int on) { if (on) { - aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) | CRT_CRTC_ON); - aty_st_le32(DAC_CNTL, (aty_ld_le32(DAC_CNTL) | DAC_PALETTE2_SNOOP_EN)); + aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) | + CRT_CRTC_ON); + aty_st_le32(DAC_CNTL, (aty_ld_le32(DAC_CNTL) | + DAC_PALETTE2_SNOOP_EN)); } else - aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) & ~CRT_CRTC_ON); + aty_st_le32(CRTC_EXT_CNTL, aty_ld_le32(CRTC_EXT_CNTL) & + ~CRT_CRTC_ON); } static void aty128_set_lcd_enable(struct aty128fb_par *par, int on) @@ -1281,7 +1290,8 @@ static void aty128_set_lcd_enable(struct aty128fb_par *par, int on) } } -static void aty128_set_pll(struct aty128_pll *pll, const struct aty128fb_par *par) +static void aty128_set_pll(struct aty128_pll *pll, + const struct aty128fb_par *par) { u32 div3; @@ -1366,7 +1376,8 @@ static int aty128_var_to_pll(u32 period_in_ps, struct aty128_pll *pll, } -static int aty128_pll_to_var(const struct aty128_pll *pll, struct fb_var_screeninfo *var) +static int aty128_pll_to_var(const struct aty128_pll *pll, + struct fb_var_screeninfo *var) { var->pixclock = 100000000 / pll->vclk; @@ -1512,7 +1523,8 @@ static int aty128fb_set_par(struct fb_info *info) * encode/decode the User Defined Part of the Display */ -static int aty128_decode_var(struct fb_var_screeninfo *var, struct aty128fb_par *par) +static int aty128_decode_var(struct fb_var_screeninfo *var, + struct aty128fb_par *par) { int err; struct aty128_crtc crtc; @@ -1559,7 +1571,8 @@ static int aty128_encode_var(struct fb_var_screeninfo *var, } -static int aty128fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) +static int aty128fb_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) { struct aty128fb_par par; int err; @@ -1575,7 +1588,8 @@ static int aty128fb_check_var(struct fb_var_screeninfo *var, struct fb_info *inf /* * Pan or Wrap the Display */ -static int aty128fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *fb) +static int aty128fb_pan_display(struct fb_var_screeninfo *var, + struct fb_info *fb) { struct aty128fb_par *par = fb->par; u32 xoffset, yoffset; @@ -1594,7 +1608,8 @@ static int aty128fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *f par->crtc.xoffset = xoffset; par->crtc.yoffset = yoffset; - offset = ((yoffset * par->crtc.vxres + xoffset)*(par->crtc.bpp >> 3)) & ~7; + offset = ((yoffset * par->crtc.vxres + xoffset) * (par->crtc.bpp >> 3)) + & ~7; if (par->crtc.bpp == 24) offset += 8 * (offset % 3); /* Must be multiple of 8 and 3 */ @@ -1620,11 +1635,13 @@ static void aty128_st_pal(u_int regno, u_int red, u_int green, u_int blue, * do mirroring */ - aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) | DAC_PALETTE_ACCESS_CNTL); + aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) | + DAC_PALETTE_ACCESS_CNTL); aty_st_8(PALETTE_INDEX, regno); aty_st_le32(PALETTE_DATA, (red<<16)|(green<<8)|blue); #endif - aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) & ~DAC_PALETTE_ACCESS_CNTL); + aty_st_le32(DAC_CNTL, aty_ld_le32(DAC_CNTL) & + ~DAC_PALETTE_ACCESS_CNTL); } aty_st_8(PALETTE_INDEX, regno); @@ -1753,7 +1770,8 @@ static int aty128_bl_update_status(struct backlight_device *bd) aty_st_le32(LVDS_GEN_CNTL, reg); } reg &= ~LVDS_BL_MOD_LEVEL_MASK; - reg |= (aty128_bl_get_level_brightness(par, level) << LVDS_BL_MOD_LEVEL_SHIFT); + reg |= (aty128_bl_get_level_brightness(par, level) << + LVDS_BL_MOD_LEVEL_SHIFT); #ifdef BACKLIGHT_LVDS_OFF reg |= LVDS_ON | LVDS_EN; reg &= ~LVDS_DISPLAY_DIS; @@ -1764,7 +1782,8 @@ static int aty128_bl_update_status(struct backlight_device *bd) #endif } else { reg &= ~LVDS_BL_MOD_LEVEL_MASK; - reg |= (aty128_bl_get_level_brightness(par, 0) << LVDS_BL_MOD_LEVEL_SHIFT); + reg |= (aty128_bl_get_level_brightness(par, 0) << + LVDS_BL_MOD_LEVEL_SHIFT); #ifdef BACKLIGHT_LVDS_OFF reg |= LVDS_DISPLAY_DIS; aty_st_le32(LVDS_GEN_CNTL, reg); @@ -1869,7 +1888,8 @@ static void aty128_early_resume(void *data) } #endif /* CONFIG_PPC_PMAC */ -static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_id *ent) +static int __devinit aty128_init(struct pci_dev *pdev, + const struct pci_device_id *ent) { struct fb_info *info = pci_get_drvdata(pdev); struct aty128fb_par *par = info->par; @@ -1887,7 +1907,8 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i /* range check to make sure */ if (ent->driver_data < ARRAY_SIZE(r128_family)) - strlcat(video_card, r128_family[ent->driver_data], sizeof(video_card)); + strlcat(video_card, r128_family[ent->driver_data], + sizeof(video_card)); printk(KERN_INFO "aty128fb: %s [chip rev 0x%x] ", video_card, chip_rev); @@ -1911,11 +1932,11 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i /* Indicate sleep capability */ if (par->chip_gen == rage_M3) { pmac_call_feature(PMAC_FTR_DEVICE_CAN_WAKE, NULL, 0, 1); -#if 0 /* Disable the early video resume hack for now as it's causing problems, among - * others we now rely on the PCI core restoring the config space for us, which - * isn't the case with that hack, and that code path causes various things to - * be called with interrupts off while they shouldn't. I'm leaving the code in - * as it can be useful for debugging purposes +#if 0 /* Disable the early video resume hack for now as it's causing problems, + * among others we now rely on the PCI core restoring the config space + * for us, which isn't the case with that hack, and that code path causes + * various things to be called with interrupts off while they shouldn't. + * I'm leaving the code in as it can be useful for debugging purposes */ pmac_set_early_video_resume(aty128_early_resume, par); #endif @@ -1953,11 +1974,11 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i default_vmode = VMODE_1152_768_60; if (default_cmode > 16) - default_cmode = CMODE_32; + default_cmode = CMODE_32; else if (default_cmode > 8) - default_cmode = CMODE_16; + default_cmode = CMODE_16; else - default_cmode = CMODE_8; + default_cmode = CMODE_8; if (mac_vmode_to_var(default_vmode, default_cmode, &var)) var = default_var; @@ -2018,7 +2039,8 @@ static int __devinit aty128_init(struct pci_dev *pdev, const struct pci_device_i #ifdef CONFIG_PCI /* register a card ++ajoshi */ -static int __devinit aty128_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +static int __devinit aty128_probe(struct pci_dev *pdev, + const struct pci_device_id *ent) { unsigned long fb_addr, reg_addr; struct aty128fb_par *par; @@ -2318,39 +2340,39 @@ static inline void aty128_rectcopy(int srcx, int srcy, int dstx, int dsty, u_int width, u_int height, struct fb_info_aty128 *par) { - u32 save_dp_datatype, save_dp_cntl, dstval; - - if (!width || !height) - return; - - dstval = depth_to_dst(par->current_par.crtc.depth); - if (dstval == DST_24BPP) { - srcx *= 3; - dstx *= 3; - width *= 3; - } else if (dstval == -EINVAL) { - printk("aty128fb: invalid depth or RGBA\n"); - return; - } - - wait_for_fifo(2, par); - save_dp_datatype = aty_ld_le32(DP_DATATYPE); - save_dp_cntl = aty_ld_le32(DP_CNTL); - - wait_for_fifo(6, par); - aty_st_le32(SRC_Y_X, (srcy << 16) | srcx); - aty_st_le32(DP_MIX, ROP3_SRCCOPY | DP_SRC_RECT); - aty_st_le32(DP_CNTL, DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM); - aty_st_le32(DP_DATATYPE, save_dp_datatype | dstval | SRC_DSTCOLOR); - - aty_st_le32(DST_Y_X, (dsty << 16) | dstx); - aty_st_le32(DST_HEIGHT_WIDTH, (height << 16) | width); - - par->blitter_may_be_busy = 1; - - wait_for_fifo(2, par); - aty_st_le32(DP_DATATYPE, save_dp_datatype); - aty_st_le32(DP_CNTL, save_dp_cntl); + u32 save_dp_datatype, save_dp_cntl, dstval; + + if (!width || !height) + return; + + dstval = depth_to_dst(par->current_par.crtc.depth); + if (dstval == DST_24BPP) { + srcx *= 3; + dstx *= 3; + width *= 3; + } else if (dstval == -EINVAL) { + printk("aty128fb: invalid depth or RGBA\n"); + return; + } + + wait_for_fifo(2, par); + save_dp_datatype = aty_ld_le32(DP_DATATYPE); + save_dp_cntl = aty_ld_le32(DP_CNTL); + + wait_for_fifo(6, par); + aty_st_le32(SRC_Y_X, (srcy << 16) | srcx); + aty_st_le32(DP_MIX, ROP3_SRCCOPY | DP_SRC_RECT); + aty_st_le32(DP_CNTL, DST_X_LEFT_TO_RIGHT | DST_Y_TOP_TO_BOTTOM); + aty_st_le32(DP_DATATYPE, save_dp_datatype | dstval | SRC_DSTCOLOR); + + aty_st_le32(DST_Y_X, (dsty << 16) | dstx); + aty_st_le32(DST_HEIGHT_WIDTH, (height << 16) | width); + + par->blitter_may_be_busy = 1; + + wait_for_fifo(2, par); + aty_st_le32(DP_DATATYPE, save_dp_datatype); + aty_st_le32(DP_CNTL, save_dp_cntl); } @@ -2358,17 +2380,17 @@ static inline void aty128_rectcopy(int srcx, int srcy, int dstx, int dsty, * Text mode accelerated functions */ -static void fbcon_aty128_bmove(struct display *p, int sy, int sx, int dy, int dx, - int height, int width) +static void fbcon_aty128_bmove(struct display *p, int sy, int sx, int dy, + int dx, int height, int width) { - sx *= fontwidth(p); - sy *= fontheight(p); - dx *= fontwidth(p); - dy *= fontheight(p); - width *= fontwidth(p); - height *= fontheight(p); - - aty128_rectcopy(sx, sy, dx, dy, width, height, + sx *= fontwidth(p); + sy *= fontheight(p); + dx *= fontwidth(p); + dy *= fontheight(p); + width *= fontwidth(p); + height *= fontheight(p); + + aty128_rectcopy(sx, sy, dx, dy, width, height, (struct fb_info_aty128 *)p->fb_info); } #endif /* 0 */ diff --git a/drivers/video/da8xx-fb.c b/drivers/video/da8xx-fb.c index 47118c75a4c0..7ae9d53f2bf1 100644 --- a/drivers/video/da8xx-fb.c +++ b/drivers/video/da8xx-fb.c @@ -30,7 +30,10 @@ #include <linux/clk.h> #include <linux/cpufreq.h> #include <linux/console.h> +#include <linux/spinlock.h> #include <linux/slab.h> +#include <linux/delay.h> +#include <linux/lcm.h> #include <video/da8xx-fb.h> #include <asm/div64.h> @@ -160,6 +163,13 @@ struct da8xx_fb_par { wait_queue_head_t vsync_wait; int vsync_flag; int vsync_timeout; + spinlock_t lock_for_chan_update; + + /* + * LCDC has 2 ping pong DMA channels, channel 0 + * and channel 1. + */ + unsigned int which_dma_channel_done; #ifdef CONFIG_CPU_FREQ struct notifier_block freq_transition; unsigned int lcd_fck_rate; @@ -260,10 +270,18 @@ static inline void lcd_enable_raster(void) { u32 reg; + /* Put LCDC in reset for several cycles */ + if (lcd_revision == LCD_VERSION_2) + /* Write 1 to reset LCDC */ + lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG); + mdelay(1); + /* Bring LCDC out of reset */ if (lcd_revision == LCD_VERSION_2) lcdc_write(0, LCD_CLK_RESET_REG); + mdelay(1); + /* Above reset sequence doesnot reset register context */ reg = lcdc_read(LCD_RASTER_CTRL_REG); if (!(reg & LCD_RASTER_ENABLE)) lcdc_write(reg | LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); @@ -277,10 +295,6 @@ static inline void lcd_disable_raster(void) reg = lcdc_read(LCD_RASTER_CTRL_REG); if (reg & LCD_RASTER_ENABLE) lcdc_write(reg & ~LCD_RASTER_ENABLE, LCD_RASTER_CTRL_REG); - - if (lcd_revision == LCD_VERSION_2) - /* Write 1 to reset LCDC */ - lcdc_write(LCD_CLK_MAIN_RESET, LCD_CLK_RESET_REG); } static void lcd_blit(int load_mode, struct da8xx_fb_par *par) @@ -344,8 +358,8 @@ static void lcd_blit(int load_mode, struct da8xx_fb_par *par) lcd_enable_raster(); } -/* Configure the Burst Size of DMA */ -static int lcd_cfg_dma(int burst_size) +/* Configure the Burst Size and fifo threhold of DMA */ +static int lcd_cfg_dma(int burst_size, int fifo_th) { u32 reg; @@ -369,6 +383,9 @@ static int lcd_cfg_dma(int burst_size) default: return -EINVAL; } + + reg |= (fifo_th << 8); + lcdc_write(reg, LCD_DMA_CTRL_REG); return 0; @@ -670,8 +687,8 @@ static int lcd_init(struct da8xx_fb_par *par, const struct lcd_ctrl_config *cfg, lcdc_write((lcdc_read(LCD_RASTER_TIMING_2_REG) & ~LCD_INVERT_PIXEL_CLOCK), LCD_RASTER_TIMING_2_REG); - /* Configure the DMA burst size. */ - ret = lcd_cfg_dma(cfg->dma_burst_sz); + /* Configure the DMA burst size and fifo threshold. */ + ret = lcd_cfg_dma(cfg->dma_burst_sz, cfg->fifo_th); if (ret < 0) return ret; @@ -715,7 +732,6 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg) { struct da8xx_fb_par *par = arg; u32 stat = lcdc_read(LCD_MASKED_STAT_REG); - u32 reg_int; if ((stat & LCD_SYNC_LOST) && (stat & LCD_FIFO_UNDERFLOW)) { lcd_disable_raster(); @@ -732,10 +748,8 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg) lcdc_write(stat, LCD_MASKED_STAT_REG); - /* Disable PL completion inerrupt */ - reg_int = lcdc_read(LCD_INT_ENABLE_CLR_REG) | - (LCD_V2_PL_INT_ENA); - lcdc_write(reg_int, LCD_INT_ENABLE_CLR_REG); + /* Disable PL completion interrupt */ + lcdc_write(LCD_V2_PL_INT_ENA, LCD_INT_ENABLE_CLR_REG); /* Setup and start data loading mode */ lcd_blit(LOAD_DATA, par); @@ -743,6 +757,7 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg) lcdc_write(stat, LCD_MASKED_STAT_REG); if (stat & LCD_END_OF_FRAME0) { + par->which_dma_channel_done = 0; lcdc_write(par->dma_start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG); lcdc_write(par->dma_end, @@ -752,6 +767,7 @@ static irqreturn_t lcdc_irq_handler_rev02(int irq, void *arg) } if (stat & LCD_END_OF_FRAME1) { + par->which_dma_channel_done = 1; lcdc_write(par->dma_start, LCD_DMA_FRM_BUF_BASE_ADDR_1_REG); lcdc_write(par->dma_end, @@ -798,6 +814,7 @@ static irqreturn_t lcdc_irq_handler_rev01(int irq, void *arg) lcdc_write(stat, LCD_STAT_REG); if (stat & LCD_END_OF_FRAME0) { + par->which_dma_channel_done = 0; lcdc_write(par->dma_start, LCD_DMA_FRM_BUF_BASE_ADDR_0_REG); lcdc_write(par->dma_end, @@ -807,6 +824,7 @@ static irqreturn_t lcdc_irq_handler_rev01(int irq, void *arg) } if (stat & LCD_END_OF_FRAME1) { + par->which_dma_channel_done = 1; lcdc_write(par->dma_start, LCD_DMA_FRM_BUF_BASE_ADDR_1_REG); lcdc_write(par->dma_end, @@ -1021,11 +1039,14 @@ static int cfb_blank(int blank, struct fb_info *info) par->blank = blank; switch (blank) { case FB_BLANK_UNBLANK: + lcd_enable_raster(); + if (par->panel_power_ctrl) par->panel_power_ctrl(1); - - lcd_enable_raster(); break; + case FB_BLANK_NORMAL: + case FB_BLANK_VSYNC_SUSPEND: + case FB_BLANK_HSYNC_SUSPEND: case FB_BLANK_POWERDOWN: if (par->panel_power_ctrl) par->panel_power_ctrl(0); @@ -1052,6 +1073,7 @@ static int da8xx_pan_display(struct fb_var_screeninfo *var, struct fb_fix_screeninfo *fix = &fbi->fix; unsigned int end; unsigned int start; + unsigned long irq_flags; if (var->xoffset != fbi->var.xoffset || var->yoffset != fbi->var.yoffset) { @@ -1069,6 +1091,21 @@ static int da8xx_pan_display(struct fb_var_screeninfo *var, end = start + fbi->var.yres * fix->line_length - 1; par->dma_start = start; par->dma_end = end; + spin_lock_irqsave(&par->lock_for_chan_update, + irq_flags); + if (par->which_dma_channel_done == 0) { + lcdc_write(par->dma_start, + LCD_DMA_FRM_BUF_BASE_ADDR_0_REG); + lcdc_write(par->dma_end, + LCD_DMA_FRM_BUF_CEILING_ADDR_0_REG); + } else if (par->which_dma_channel_done == 1) { + lcdc_write(par->dma_start, + LCD_DMA_FRM_BUF_BASE_ADDR_1_REG); + lcdc_write(par->dma_end, + LCD_DMA_FRM_BUF_CEILING_ADDR_1_REG); + } + spin_unlock_irqrestore(&par->lock_for_chan_update, + irq_flags); } } @@ -1114,6 +1151,7 @@ static int __devinit fb_probe(struct platform_device *device) struct da8xx_fb_par *par; resource_size_t len; int ret, i; + unsigned long ulcm; if (fb_pdata == NULL) { dev_err(&device->dev, "Can not get platform data\n"); @@ -1209,7 +1247,8 @@ static int __devinit fb_probe(struct platform_device *device) /* allocate frame buffer */ par->vram_size = lcdc_info->width * lcdc_info->height * lcd_cfg->bpp; - par->vram_size = PAGE_ALIGN(par->vram_size/8); + ulcm = lcm((lcdc_info->width * lcd_cfg->bpp)/8, PAGE_SIZE); + par->vram_size = roundup(par->vram_size/8, ulcm); par->vram_size = par->vram_size * LCD_NUM_BUFFERS; par->vram_virt = dma_alloc_coherent(NULL, @@ -1296,6 +1335,8 @@ static int __devinit fb_probe(struct platform_device *device) /* initialize the vsync wait queue */ init_waitqueue_head(&par->vsync_wait); par->vsync_timeout = HZ / 5; + par->which_dma_channel_done = -1; + spin_lock_init(&par->lock_for_chan_update); /* Register the Frame Buffer */ if (register_framebuffer(da8xx_fb_info) < 0) { @@ -1382,11 +1423,12 @@ static int fb_resume(struct platform_device *dev) struct da8xx_fb_par *par = info->par; console_lock(); + clk_enable(par->lcdc_clk); + lcd_enable_raster(); + if (par->panel_power_ctrl) par->panel_power_ctrl(1); - clk_enable(par->lcdc_clk); - lcd_enable_raster(); fb_set_suspend(info, 0); console_unlock(); diff --git a/drivers/video/epson1355fb.c b/drivers/video/epson1355fb.c index a268cbf1cbea..68b9b511ce80 100644 --- a/drivers/video/epson1355fb.c +++ b/drivers/video/epson1355fb.c @@ -477,11 +477,11 @@ static __init unsigned int get_fb_size(struct fb_info *info) return size; } -static int epson1355_width_tab[2][4] __initdata = +static int epson1355_width_tab[2][4] __devinitdata = { {4, 8, 16, -1}, {9, 12, 16, -1} }; -static int epson1355_bpp_tab[8] __initdata = { 1, 2, 4, 8, 15, 16 }; +static int epson1355_bpp_tab[8] __devinitdata = { 1, 2, 4, 8, 15, 16 }; -static void __init fetch_hw_state(struct fb_info *info, struct epson1355_par *par) +static void __devinit fetch_hw_state(struct fb_info *info, struct epson1355_par *par) { struct fb_var_screeninfo *var = &info->var; struct fb_fix_screeninfo *fix = &info->fix; @@ -601,7 +601,7 @@ static int epson1355fb_remove(struct platform_device *dev) return 0; } -int __devinit epson1355fb_probe(struct platform_device *dev) +static int __devinit epson1355fb_probe(struct platform_device *dev) { struct epson1355_par *default_par; struct fb_info *info; diff --git a/drivers/video/exynos/exynos_dp_core.c b/drivers/video/exynos/exynos_dp_core.c index a36b2d28280e..c6c016a506ce 100644 --- a/drivers/video/exynos/exynos_dp_core.c +++ b/drivers/video/exynos/exynos_dp_core.c @@ -47,7 +47,7 @@ static int exynos_dp_detect_hpd(struct exynos_dp_device *dp) exynos_dp_init_hpd(dp); - udelay(200); + usleep_range(200, 210); while (exynos_dp_get_plug_in_status(dp) != 0) { timeout_loop++; @@ -55,7 +55,7 @@ static int exynos_dp_detect_hpd(struct exynos_dp_device *dp) dev_err(dp->dev, "failed to get hpd plug status\n"); return -ETIMEDOUT; } - udelay(10); + usleep_range(10, 11); } return 0; @@ -304,7 +304,7 @@ static void exynos_dp_link_start(struct exynos_dp_device *dp) buf[lane] = DPCD_PRE_EMPHASIS_PATTERN2_LEVEL0 | DPCD_VOLTAGE_SWING_PATTERN1_LEVEL0; exynos_dp_write_bytes_to_dpcd(dp, - DPCD_ADDR_TRAINING_PATTERN_SET, + DPCD_ADDR_TRAINING_LANE0_SET, lane_count, buf); } @@ -336,7 +336,7 @@ static int exynos_dp_channel_eq_ok(u8 link_status[6], int lane_count) u8 lane_status; lane_align = link_status[2]; - if ((lane_align == DPCD_INTERLANE_ALIGN_DONE) == 0) + if ((lane_align & DPCD_INTERLANE_ALIGN_DONE) == 0) return -EINVAL; for (lane = 0; lane < lane_count; lane++) { @@ -407,6 +407,9 @@ static unsigned int exynos_dp_get_lane_link_training( case 3: reg = exynos_dp_get_lane3_link_training(dp); break; + default: + WARN_ON(1); + return 0; } return reg; @@ -483,7 +486,7 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp) u8 pre_emphasis; u8 training_lane; - udelay(100); + usleep_range(100, 101); exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_LANE0_1_STATUS, 6, link_status); @@ -501,7 +504,7 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp) buf[0] = DPCD_SCRAMBLING_DISABLED | DPCD_TRAINING_PATTERN_2; exynos_dp_write_byte_to_dpcd(dp, - DPCD_ADDR_TRAINING_LANE0_SET, + DPCD_ADDR_TRAINING_PATTERN_SET, buf[0]); for (lane = 0; lane < lane_count; lane++) { @@ -568,7 +571,7 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp) u8 adjust_request[2]; - udelay(400); + usleep_range(400, 401); exynos_dp_read_bytes_from_dpcd(dp, DPCD_ADDR_LANE0_1_STATUS, 6, link_status); @@ -736,7 +739,7 @@ static int exynos_dp_set_link_train(struct exynos_dp_device *dp, if (retval == 0) break; - udelay(100); + usleep_range(100, 110); } return retval; @@ -770,7 +773,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp, return -ETIMEDOUT; } - udelay(1); + usleep_range(1, 2); } /* Set to use the register calculated M/N video */ @@ -804,7 +807,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp, return -ETIMEDOUT; } - mdelay(1); + usleep_range(1000, 1001); } if (retval != 0) diff --git a/drivers/video/exynos/exynos_dp_core.h b/drivers/video/exynos/exynos_dp_core.h index 1e0f998e0c9f..8526e548c385 100644 --- a/drivers/video/exynos/exynos_dp_core.h +++ b/drivers/video/exynos/exynos_dp_core.h @@ -85,10 +85,6 @@ void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype); void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype); void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count); void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count); -void exynos_dp_set_link_bandwidth(struct exynos_dp_device *dp, u32 bwtype); -void exynos_dp_get_link_bandwidth(struct exynos_dp_device *dp, u32 *bwtype); -void exynos_dp_set_lane_count(struct exynos_dp_device *dp, u32 count); -void exynos_dp_get_lane_count(struct exynos_dp_device *dp, u32 *count); void exynos_dp_enable_enhanced_mode(struct exynos_dp_device *dp, bool enable); void exynos_dp_set_training_pattern(struct exynos_dp_device *dp, enum pattern_set pattern); diff --git a/drivers/video/exynos/exynos_dp_reg.c b/drivers/video/exynos/exynos_dp_reg.c index bcb0e3ae1e9d..2db5b9aa250a 100644 --- a/drivers/video/exynos/exynos_dp_reg.c +++ b/drivers/video/exynos/exynos_dp_reg.c @@ -122,7 +122,7 @@ void exynos_dp_reset(struct exynos_dp_device *dp) LS_CLK_DOMAIN_FUNC_EN_N; writel(reg, dp->reg_base + EXYNOS_DP_FUNC_EN_2); - udelay(20); + usleep_range(20, 30); exynos_dp_lane_swap(dp, 0); @@ -988,7 +988,7 @@ void exynos_dp_reset_macro(struct exynos_dp_device *dp) writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST); /* 10 us is the minimum reset time. */ - udelay(10); + usleep_range(10, 20); reg &= ~MACRO_RST; writel(reg, dp->reg_base + EXYNOS_DP_PHY_TEST); diff --git a/drivers/video/exynos/exynos_mipi_dsi.c b/drivers/video/exynos/exynos_mipi_dsi.c index 9908e75ae761..4bc2b8a5dd8b 100644 --- a/drivers/video/exynos/exynos_mipi_dsi.c +++ b/drivers/video/exynos/exynos_mipi_dsi.c @@ -154,7 +154,7 @@ static int exynos_mipi_dsi_blank_mode(struct mipi_dsim_device *dsim, int power) if (client_drv && client_drv->power_on) client_drv->power_on(client_dev, 1); - exynos_mipi_regulator_disable(dsim); + exynos_mipi_regulator_enable(dsim); /* enable MIPI-DSI PHY. */ if (dsim->pd->phy_enable) diff --git a/drivers/video/exynos/s6e8ax0.h b/drivers/video/exynos/s6e8ax0.h deleted file mode 100644 index 1f1b270484b0..000000000000 --- a/drivers/video/exynos/s6e8ax0.h +++ /dev/null @@ -1,21 +0,0 @@ -/* linux/drivers/video/backlight/s6e8ax0.h - * - * MIPI-DSI based s6e8ax0 AMOLED LCD Panel definitions. - * - * Copyright (c) 2011 Samsung Electronics - * - * Inki Dae, <inki.dae@samsung.com> - * Donghwa Lee <dh09.lee@samsung.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. -*/ - -#ifndef _S6E8AX0_H -#define _S6E8AX0_H - -extern void s6e8ax0_init(void); - -#endif - diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c index 1ddeb11659d4..64cda560c488 100644 --- a/drivers/video/fb_defio.c +++ b/drivers/video/fb_defio.c @@ -104,6 +104,8 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma, deferred framebuffer IO. then if userspace touches a page again, we repeat the same scheme */ + file_update_time(vma->vm_file); + /* protect against the workqueue changing the page list */ mutex_lock(&fbdefio->lock); diff --git a/drivers/video/fb_draw.h b/drivers/video/fb_draw.h index 04c01faaf772..624ee115f129 100644 --- a/drivers/video/fb_draw.h +++ b/drivers/video/fb_draw.h @@ -3,6 +3,7 @@ #include <asm/types.h> #include <linux/fb.h> +#include <linux/bug.h> /* * Compose two values, using a bitmask as decision value @@ -41,7 +42,8 @@ pixel_to_pat( u32 bpp, u32 pixel) case 32: return 0x0000000100000001ul*pixel; default: - panic("pixel_to_pat(): unsupported pixelformat\n"); + WARN(1, "pixel_to_pat(): unsupported pixelformat %d\n", bpp); + return 0; } } #else @@ -66,7 +68,8 @@ pixel_to_pat( u32 bpp, u32 pixel) case 32: return 0x00000001ul*pixel; default: - panic("pixel_to_pat(): unsupported pixelformat\n"); + WARN(1, "pixel_to_pat(): unsupported pixelformat %d\n", bpp); + return 0; } } #endif diff --git a/drivers/video/grvga.c b/drivers/video/grvga.c index da066c210923..5245f9a71892 100644 --- a/drivers/video/grvga.c +++ b/drivers/video/grvga.c @@ -354,7 +354,7 @@ static int __devinit grvga_probe(struct platform_device *dev) */ if (fb_get_options("grvga", &options)) { retval = -ENODEV; - goto err; + goto free_fb; } if (!options || !*options) @@ -370,7 +370,7 @@ static int __devinit grvga_probe(struct platform_device *dev) if (grvga_parse_custom(this_opt, &info->var) < 0) { dev_err(&dev->dev, "Failed to parse custom mode (%s).\n", this_opt); retval = -EINVAL; - goto err1; + goto free_fb; } } else if (!strncmp(this_opt, "addr", 4)) grvga_fix_addr = simple_strtoul(this_opt + 5, NULL, 16); @@ -387,10 +387,11 @@ static int __devinit grvga_probe(struct platform_device *dev) info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_YPAN; info->fix.smem_len = grvga_mem_size; - if (!request_mem_region(dev->resource[0].start, resource_size(&dev->resource[0]), "grlib-svgactrl regs")) { + if (!devm_request_mem_region(&dev->dev, dev->resource[0].start, + resource_size(&dev->resource[0]), "grlib-svgactrl regs")) { dev_err(&dev->dev, "registers already mapped\n"); retval = -EBUSY; - goto err; + goto free_fb; } par->regs = of_ioremap(&dev->resource[0], 0, @@ -400,14 +401,14 @@ static int __devinit grvga_probe(struct platform_device *dev) if (!par->regs) { dev_err(&dev->dev, "failed to map registers\n"); retval = -ENOMEM; - goto err1; + goto free_fb; } retval = fb_alloc_cmap(&info->cmap, 256, 0); if (retval < 0) { dev_err(&dev->dev, "failed to allocate mem with fb_alloc_cmap\n"); retval = -ENOMEM; - goto err2; + goto unmap_regs; } if (mode_opt) { @@ -415,7 +416,7 @@ static int __devinit grvga_probe(struct platform_device *dev) grvga_modedb, sizeof(grvga_modedb), &grvga_modedb[0], 8); if (!retval || retval == 4) { retval = -EINVAL; - goto err3; + goto dealloc_cmap; } } @@ -427,10 +428,11 @@ static int __devinit grvga_probe(struct platform_device *dev) physical_start = grvga_fix_addr; - if (!request_mem_region(physical_start, grvga_mem_size, dev->name)) { + if (!devm_request_mem_region(&dev->dev, physical_start, + grvga_mem_size, dev->name)) { dev_err(&dev->dev, "failed to request memory region\n"); retval = -ENOMEM; - goto err3; + goto dealloc_cmap; } virtual_start = (unsigned long) ioremap(physical_start, grvga_mem_size); @@ -438,7 +440,7 @@ static int __devinit grvga_probe(struct platform_device *dev) if (!virtual_start) { dev_err(&dev->dev, "error mapping framebuffer memory\n"); retval = -ENOMEM; - goto err4; + goto dealloc_cmap; } } else { /* Allocate frambuffer memory */ @@ -451,7 +453,7 @@ static int __devinit grvga_probe(struct platform_device *dev) "unable to allocate framebuffer memory (%lu bytes)\n", grvga_mem_size); retval = -ENOMEM; - goto err3; + goto dealloc_cmap; } physical_start = dma_map_single(&dev->dev, (void *)virtual_start, grvga_mem_size, DMA_TO_DEVICE); @@ -484,7 +486,7 @@ static int __devinit grvga_probe(struct platform_device *dev) retval = register_framebuffer(info); if (retval < 0) { dev_err(&dev->dev, "failed to register framebuffer\n"); - goto err4; + goto free_mem; } __raw_writel(physical_start, &par->regs->fb_pos); @@ -493,21 +495,18 @@ static int __devinit grvga_probe(struct platform_device *dev) return 0; -err4: +free_mem: dev_set_drvdata(&dev->dev, NULL); - if (grvga_fix_addr) { - release_mem_region(physical_start, grvga_mem_size); + if (grvga_fix_addr) iounmap((void *)virtual_start); - } else + else kfree((void *)virtual_start); -err3: +dealloc_cmap: fb_dealloc_cmap(&info->cmap); -err2: +unmap_regs: of_iounmap(&dev->resource[0], par->regs, resource_size(&dev->resource[0])); -err1: - release_mem_region(dev->resource[0].start, resource_size(&dev->resource[0])); -err: +free_fb: framebuffer_release(info); return retval; @@ -524,12 +523,10 @@ static int __devexit grvga_remove(struct platform_device *device) of_iounmap(&device->resource[0], par->regs, resource_size(&device->resource[0])); - release_mem_region(device->resource[0].start, resource_size(&device->resource[0])); - if (!par->fb_alloced) { - release_mem_region(info->fix.smem_start, info->fix.smem_len); + if (!par->fb_alloced) iounmap(info->screen_base); - } else + else kfree((void *)info->screen_base); framebuffer_release(info); diff --git a/drivers/video/mx3fb.c b/drivers/video/mx3fb.c index eec0d7b748eb..c89f8a8d36d2 100644 --- a/drivers/video/mx3fb.c +++ b/drivers/video/mx3fb.c @@ -269,7 +269,7 @@ struct mx3fb_info { dma_cookie_t cookie; struct scatterlist sg[2]; - u32 sync; /* preserve var->sync flags */ + struct fb_var_screeninfo cur_var; /* current var info */ }; static void mx3fb_dma_done(void *); @@ -698,9 +698,29 @@ static void mx3fb_dma_done(void *arg) complete(&mx3_fbi->flip_cmpl); } +static bool mx3fb_must_set_par(struct fb_info *fbi) +{ + struct mx3fb_info *mx3_fbi = fbi->par; + struct fb_var_screeninfo old_var = mx3_fbi->cur_var; + struct fb_var_screeninfo new_var = fbi->var; + + if ((fbi->var.activate & FB_ACTIVATE_FORCE) && + (fbi->var.activate & FB_ACTIVATE_MASK) == FB_ACTIVATE_NOW) + return true; + + /* + * Ignore xoffset and yoffset update, + * because pan display handles this case. + */ + old_var.xoffset = new_var.xoffset; + old_var.yoffset = new_var.yoffset; + + return !!memcmp(&old_var, &new_var, sizeof(struct fb_var_screeninfo)); +} + static int __set_par(struct fb_info *fbi, bool lock) { - u32 mem_len; + u32 mem_len, cur_xoffset, cur_yoffset; struct ipu_di_signal_cfg sig_cfg; enum ipu_panel mode = IPU_PANEL_TFT; struct mx3fb_info *mx3_fbi = fbi->par; @@ -780,8 +800,25 @@ static int __set_par(struct fb_info *fbi, bool lock) video->out_height = fbi->var.yres; video->out_stride = fbi->var.xres_virtual; - if (mx3_fbi->blank == FB_BLANK_UNBLANK) + if (mx3_fbi->blank == FB_BLANK_UNBLANK) { sdc_enable_channel(mx3_fbi); + /* + * sg[0] points to fb smem_start address + * and is actually active in controller. + */ + mx3_fbi->cur_var.xoffset = 0; + mx3_fbi->cur_var.yoffset = 0; + } + + /* + * Preserve xoffset and yoffest in case they are + * inactive in controller as fb is blanked. + */ + cur_xoffset = mx3_fbi->cur_var.xoffset; + cur_yoffset = mx3_fbi->cur_var.yoffset; + mx3_fbi->cur_var = fbi->var; + mx3_fbi->cur_var.xoffset = cur_xoffset; + mx3_fbi->cur_var.yoffset = cur_yoffset; return 0; } @@ -802,7 +839,7 @@ static int mx3fb_set_par(struct fb_info *fbi) mutex_lock(&mx3_fbi->mutex); - ret = __set_par(fbi, true); + ret = mx3fb_must_set_par(fbi) ? __set_par(fbi, true) : 0; mutex_unlock(&mx3_fbi->mutex); @@ -901,8 +938,8 @@ static int mx3fb_check_var(struct fb_var_screeninfo *var, struct fb_info *fbi) var->grayscale = 0; /* Preserve sync flags */ - var->sync |= mx3_fbi->sync; - mx3_fbi->sync |= var->sync; + var->sync |= mx3_fbi->cur_var.sync; + mx3_fbi->cur_var.sync |= var->sync; return 0; } @@ -1043,8 +1080,8 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var, return -EINVAL; } - if (fbi->var.xoffset == var->xoffset && - fbi->var.yoffset == var->yoffset) + if (mx3_fbi->cur_var.xoffset == var->xoffset && + mx3_fbi->cur_var.yoffset == var->yoffset) return 0; /* No change, do nothing */ y_bottom = var->yoffset; @@ -1127,6 +1164,8 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var, else fbi->var.vmode &= ~FB_VMODE_YWRAP; + mx3_fbi->cur_var = fbi->var; + mutex_unlock(&mx3_fbi->mutex); dev_dbg(fbi->device, "Update complete\n"); diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c index ad741c3d1ae1..eaeed4340e04 100644 --- a/drivers/video/omap2/displays/panel-acx565akm.c +++ b/drivers/video/omap2/displays/panel-acx565akm.c @@ -487,6 +487,13 @@ static struct omap_video_timings acx_panel_timings = { .vfp = 3, .vsw = 3, .vbp = 4, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }; static int acx_panel_probe(struct omap_dss_device *dssdev) @@ -498,8 +505,7 @@ static int acx_panel_probe(struct omap_dss_device *dssdev) struct backlight_properties props; dev_dbg(&dssdev->dev, "%s\n", __func__); - dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS; + /* FIXME AC bias ? */ dssdev->panel.timings = acx_panel_timings; diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c index e42f9dc22123..bc5af2500eb9 100644 --- a/drivers/video/omap2/displays/panel-generic-dpi.c +++ b/drivers/video/omap2/displays/panel-generic-dpi.c @@ -40,12 +40,6 @@ struct panel_config { struct omap_video_timings timings; - int acbi; /* ac-bias pin transitions per interrupt */ - /* Unit: line clocks */ - int acb; /* ac-bias pin frequency */ - - enum omap_panel_config config; - int power_on_delay; int power_off_delay; @@ -73,11 +67,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 11, .vfp = 3, .vbp = 2, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_LOW, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO, .power_on_delay = 50, .power_off_delay = 100, .name = "sharp_lq", @@ -98,11 +94,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 1, .vfp = 1, .vbp = 1, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x28, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, .power_on_delay = 50, .power_off_delay = 100, .name = "sharp_ls", @@ -123,12 +121,13 @@ static struct panel_config generic_dpi_panels[] = { .vfp = 4, .vsw = 2, .vbp = 2, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, }, - .acbi = 0x0, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC | - OMAP_DSS_LCD_ONOFF, .power_on_delay = 0, .power_off_delay = 0, .name = "toppoly_tdo35s", @@ -149,11 +148,13 @@ static struct panel_config generic_dpi_panels[] = { .vfp = 4, .vsw = 10, .vbp = 12 - 10, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, .power_on_delay = 0, .power_off_delay = 0, .name = "samsung_lte430wq_f0c", @@ -174,11 +175,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 2, .vfp = 4, .vbp = 11, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, .power_on_delay = 0, .power_off_delay = 0, .name = "seiko_70wvw1tz3", @@ -199,11 +202,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 10, .vfp = 2, .vbp = 2, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_LOW, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO, .power_on_delay = 0, .power_off_delay = 0, .name = "powertip_ph480272t", @@ -224,11 +229,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 3, .vfp = 12, .vbp = 25, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x28, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, .power_on_delay = 0, .power_off_delay = 0, .name = "innolux_at070tn83", @@ -249,9 +256,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 1, .vfp = 2, .vbp = 7, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, .name = "nec_nl2432dr22-11b", }, @@ -270,9 +281,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 1, .vfp = 1, .vbp = 1, - }, - .config = OMAP_DSS_LCD_TFT, + .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, + }, .name = "h4", }, @@ -291,10 +306,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 10, .vfp = 2, .vbp = 2, - }, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, + }, .name = "apollon", }, /* FocalTech ETM070003DH6 */ @@ -312,9 +330,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 3, .vfp = 13, .vbp = 29, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS, .name = "focaltech_etm070003dh6", }, @@ -333,11 +355,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 23, .vfp = 1, .vbp = 1, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .acbi = 0x0, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC, .power_on_delay = 0, .power_off_delay = 0, .name = "microtips_umsh_8173md", @@ -358,9 +382,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 10, .vfp = 4, .vbp = 2, - }, - .config = OMAP_DSS_LCD_TFT, + .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, + }, .name = "ortustech_com43h4m10xtc", }, @@ -379,11 +407,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 10, .vfp = 12, .vbp = 23, - }, - .acb = 0x0, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IEO, + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_LOW, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, + }, .name = "innolux_at080tn52", }, @@ -401,8 +431,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 1, .vfp = 26, .vbp = 1, + + .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT, .name = "mitsubishi_aa084sb01", }, /* EDT ET0500G0DH6 */ @@ -419,8 +454,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 2, .vfp = 35, .vbp = 10, + + .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT, .name = "edt_et0500g0dh6", }, @@ -439,9 +479,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 2, .vfp = 10, .vbp = 33, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC, .name = "primeview_pd050vl1", }, @@ -460,9 +504,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 2, .vfp = 10, .vbp = 33, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC, .name = "primeview_pm070wl4", }, @@ -481,9 +529,13 @@ static struct panel_config generic_dpi_panels[] = { .vsw = 4, .vfp = 1, .vbp = 23, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }, - .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC, .name = "primeview_pd104slf", }, }; @@ -573,10 +625,7 @@ static int generic_dpi_panel_probe(struct omap_dss_device *dssdev) if (!panel_config) return -EINVAL; - dssdev->panel.config = panel_config->config; dssdev->panel.timings = panel_config->timings; - dssdev->panel.acb = panel_config->acb; - dssdev->panel.acbi = panel_config->acbi; drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL); if (!drv_data) diff --git a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c index 0841cc2b3f77..802807798846 100644 --- a/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c +++ b/drivers/video/omap2/displays/panel-lgphilips-lb035q02.c @@ -40,6 +40,12 @@ static struct omap_video_timings lb035q02_timings = { .vsw = 2, .vfp = 4, .vbp = 18, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }; static int lb035q02_panel_power_on(struct omap_dss_device *dssdev) @@ -82,8 +88,6 @@ static int lb035q02_panel_probe(struct omap_dss_device *dssdev) struct lb035q02_data *ld; int r; - dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS; dssdev->panel.timings = lb035q02_timings; ld = kzalloc(sizeof(*ld), GFP_KERNEL); diff --git a/drivers/video/omap2/displays/panel-n8x0.c b/drivers/video/omap2/displays/panel-n8x0.c index 4a34cdc1371b..e6c115373c00 100644 --- a/drivers/video/omap2/displays/panel-n8x0.c +++ b/drivers/video/omap2/displays/panel-n8x0.c @@ -473,7 +473,6 @@ static int n8x0_panel_probe(struct omap_dss_device *dssdev) mutex_init(&ddata->lock); - dssdev->panel.config = OMAP_DSS_LCD_TFT; dssdev->panel.timings.x_res = 800; dssdev->panel.timings.y_res = 480; dssdev->ctrl.pixel_size = 16; diff --git a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c index 8b38b39213f4..b122b0f31c43 100644 --- a/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c +++ b/drivers/video/omap2/displays/panel-nec-nl8048hl11-01b.c @@ -76,6 +76,12 @@ static struct omap_video_timings nec_8048_panel_timings = { .vfp = 3, .vsw = 1, .vbp = 4, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, }; static int nec_8048_bl_update_status(struct backlight_device *bl) @@ -116,9 +122,6 @@ static int nec_8048_panel_probe(struct omap_dss_device *dssdev) struct backlight_properties props; int r; - dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_RF | - OMAP_DSS_LCD_ONOFF; dssdev->panel.timings = nec_8048_panel_timings; necd = kzalloc(sizeof(*necd), GFP_KERNEL); diff --git a/drivers/video/omap2/displays/panel-picodlp.c b/drivers/video/omap2/displays/panel-picodlp.c index 98ebdaddab5a..2d35bd388860 100644 --- a/drivers/video/omap2/displays/panel-picodlp.c +++ b/drivers/video/omap2/displays/panel-picodlp.c @@ -69,6 +69,12 @@ static struct omap_video_timings pico_ls_timings = { .vsw = 2, .vfp = 3, .vbp = 14, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, }; static inline struct picodlp_panel_data @@ -414,9 +420,6 @@ static int picodlp_panel_probe(struct omap_dss_device *dssdev) struct i2c_client *picodlp_i2c_client; int r = 0, picodlp_adapter_id; - dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_ONOFF | - OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IVS; - dssdev->panel.acb = 0x0; dssdev->panel.timings = pico_ls_timings; picod = kzalloc(sizeof(struct picodlp_data), GFP_KERNEL); diff --git a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c index ba38b3ad17d6..bd86ba9ccf76 100644 --- a/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c +++ b/drivers/video/omap2/displays/panel-sharp-ls037v7dw01.c @@ -44,6 +44,12 @@ static struct omap_video_timings sharp_ls_timings = { .vsw = 1, .vfp = 1, .vbp = 1, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }; static int sharp_ls_bl_update_status(struct backlight_device *bl) @@ -86,9 +92,6 @@ static int sharp_ls_panel_probe(struct omap_dss_device *dssdev) struct sharp_data *sd; int r; - dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS | - OMAP_DSS_LCD_IHS; - dssdev->panel.acb = 0x28; dssdev->panel.timings = sharp_ls_timings; sd = kzalloc(sizeof(*sd), GFP_KERNEL); diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c index 901576eb5a84..3f5acc7771da 100644 --- a/drivers/video/omap2/displays/panel-taal.c +++ b/drivers/video/omap2/displays/panel-taal.c @@ -882,7 +882,6 @@ static int taal_probe(struct omap_dss_device *dssdev) goto err; } - dssdev->panel.config = OMAP_DSS_LCD_TFT; dssdev->panel.timings = panel_config->timings; dssdev->panel.dsi_pix_fmt = OMAP_DSS_DSI_FMT_RGB888; diff --git a/drivers/video/omap2/displays/panel-tfp410.c b/drivers/video/omap2/displays/panel-tfp410.c index bff306e041ca..40cc0cfa5d17 100644 --- a/drivers/video/omap2/displays/panel-tfp410.c +++ b/drivers/video/omap2/displays/panel-tfp410.c @@ -39,6 +39,12 @@ static const struct omap_video_timings tfp410_default_timings = { .vfp = 3, .vsw = 4, .vbp = 7, + + .vsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .hsync_level = OMAPDSS_SIG_ACTIVE_HIGH, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }; struct panel_drv_data { @@ -95,7 +101,6 @@ static int tfp410_probe(struct omap_dss_device *dssdev) return -ENOMEM; dssdev->panel.timings = tfp410_default_timings; - dssdev->panel.config = OMAP_DSS_LCD_TFT; ddata->dssdev = dssdev; mutex_init(&ddata->lock); diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c index 4b6448b3c31f..fa7baa650ae0 100644 --- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c +++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c @@ -267,6 +267,12 @@ static const struct omap_video_timings tpo_td043_timings = { .vsw = 1, .vfp = 39, .vbp = 34, + + .vsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .hsync_level = OMAPDSS_SIG_ACTIVE_LOW, + .data_pclk_edge = OMAPDSS_DRIVE_SIG_FALLING_EDGE, + .de_level = OMAPDSS_SIG_ACTIVE_HIGH, + .sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES, }; static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043) @@ -423,8 +429,6 @@ static int tpo_td043_probe(struct omap_dss_device *dssdev) return -ENODEV; } - dssdev->panel.config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IHS | - OMAP_DSS_LCD_IVS | OMAP_DSS_LCD_IPC; dssdev->panel.timings = tpo_td043_timings; dssdev->ctrl.pixel_size = 24; diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig index 43324e5ed25f..b337a8469fd8 100644 --- a/drivers/video/omap2/dss/Kconfig +++ b/drivers/video/omap2/dss/Kconfig @@ -52,7 +52,7 @@ config OMAP2_DSS_RFBI DBI is a bus between the host processor and a peripheral, such as a display or a framebuffer chip. - See http://www.mipi.org/ for DBI spesifications. + See http://www.mipi.org/ for DBI specifications. config OMAP2_DSS_VENC bool "VENC support" @@ -92,7 +92,7 @@ config OMAP2_DSS_DSI DSI is a high speed half-duplex serial interface between the host processor and a peripheral, such as a display or a framebuffer chip. - See http://www.mipi.org/ for DSI spesifications. + See http://www.mipi.org/ for DSI specifications. config OMAP2_DSS_MIN_FCK_PER_PCK int "Minimum FCK/PCK ratio (for scaling)" diff --git a/drivers/video/omap2/dss/apply.c b/drivers/video/omap2/dss/apply.c index ab22cc224f3e..0fefc68372b9 100644 --- a/drivers/video/omap2/dss/apply.c +++ b/drivers/video/omap2/dss/apply.c @@ -104,6 +104,7 @@ struct mgr_priv_data { bool shadow_extra_info_dirty; struct omap_video_timings timings; + struct dss_lcd_mgr_config lcd_config; }; static struct { @@ -137,6 +138,7 @@ static struct mgr_priv_data *get_mgr_priv(struct omap_overlay_manager *mgr) void dss_apply_init(void) { const int num_ovls = dss_feat_get_num_ovls(); + struct mgr_priv_data *mp; int i; spin_lock_init(&data_lock); @@ -168,16 +170,35 @@ void dss_apply_init(void) op->user_info = op->info; } + + /* + * Initialize some of the lcd_config fields for TV manager, this lets + * us prevent checking if the manager is LCD or TV at some places + */ + mp = &dss_data.mgr_priv_data_array[OMAP_DSS_CHANNEL_DIGIT]; + + mp->lcd_config.video_port_width = 24; + mp->lcd_config.clock_info.lck_div = 1; + mp->lcd_config.clock_info.pck_div = 1; } +/* + * A LCD manager's stallmode decides whether it is in manual or auto update. TV + * manager is always auto update, stallmode field for TV manager is false by + * default + */ static bool ovl_manual_update(struct omap_overlay *ovl) { - return ovl->manager->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE; + struct mgr_priv_data *mp = get_mgr_priv(ovl->manager); + + return mp->lcd_config.stallmode; } static bool mgr_manual_update(struct omap_overlay_manager *mgr) { - return mgr->device->caps & OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE; + struct mgr_priv_data *mp = get_mgr_priv(mgr); + + return mp->lcd_config.stallmode; } static int dss_check_settings_low(struct omap_overlay_manager *mgr, @@ -214,7 +235,7 @@ static int dss_check_settings_low(struct omap_overlay_manager *mgr, ois[ovl->id] = oi; } - return dss_mgr_check(mgr, mi, &mp->timings, ois); + return dss_mgr_check(mgr, mi, &mp->timings, &mp->lcd_config, ois); } /* @@ -537,7 +558,7 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl) { struct ovl_priv_data *op = get_ovl_priv(ovl); struct omap_overlay_info *oi; - bool ilace, replication; + bool replication; struct mgr_priv_data *mp; int r; @@ -550,11 +571,9 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl) mp = get_mgr_priv(ovl->manager); - replication = dss_use_replication(ovl->manager->device, oi->color_mode); - - ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC; + replication = dss_ovl_use_replication(mp->lcd_config, oi->color_mode); - r = dispc_ovl_setup(ovl->id, oi, ilace, replication, &mp->timings); + r = dispc_ovl_setup(ovl->id, oi, replication, &mp->timings); if (r) { /* * We can't do much here, as this function can be called from @@ -635,6 +654,24 @@ static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr) dispc_mgr_set_timings(mgr->id, &mp->timings); + /* lcd_config parameters */ + if (dss_mgr_is_lcd(mgr->id)) { + dispc_mgr_set_io_pad_mode(mp->lcd_config.io_pad_mode); + + dispc_mgr_enable_stallmode(mgr->id, mp->lcd_config.stallmode); + dispc_mgr_enable_fifohandcheck(mgr->id, + mp->lcd_config.fifohandcheck); + + dispc_mgr_set_clock_div(mgr->id, &mp->lcd_config.clock_info); + + dispc_mgr_set_tft_data_lines(mgr->id, + mp->lcd_config.video_port_width); + + dispc_lcd_enable_signal_polarity(mp->lcd_config.lcden_sig_polarity); + + dispc_mgr_set_lcd_type_tft(mgr->id); + } + mp->extra_info_dirty = false; if (mp->updating) mp->shadow_extra_info_dirty = true; @@ -1294,6 +1331,44 @@ void dss_mgr_set_timings(struct omap_overlay_manager *mgr, mutex_unlock(&apply_lock); } +static void dss_apply_mgr_lcd_config(struct omap_overlay_manager *mgr, + const struct dss_lcd_mgr_config *config) +{ + struct mgr_priv_data *mp = get_mgr_priv(mgr); + + mp->lcd_config = *config; + mp->extra_info_dirty = true; +} + +void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr, + const struct dss_lcd_mgr_config *config) +{ + unsigned long flags; + struct mgr_priv_data *mp = get_mgr_priv(mgr); + + mutex_lock(&apply_lock); + + if (mp->enabled) { + DSSERR("cannot apply lcd config for %s: manager needs to be disabled\n", + mgr->name); + goto out; + } + + spin_lock_irqsave(&data_lock, flags); + + dss_apply_mgr_lcd_config(mgr, config); + + dss_write_regs(); + dss_set_go_bits(); + + spin_unlock_irqrestore(&data_lock, flags); + + wait_pending_extra_info_updates(); + +out: + mutex_unlock(&apply_lock); +} + int dss_ovl_set_info(struct omap_overlay *ovl, struct omap_overlay_info *info) { diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c index 397d4eee11bb..5b289c5f695b 100644 --- a/drivers/video/omap2/dss/dispc.c +++ b/drivers/video/omap2/dss/dispc.c @@ -119,6 +119,97 @@ enum omap_color_component { DISPC_COLOR_COMPONENT_UV = 1 << 1, }; +enum mgr_reg_fields { + DISPC_MGR_FLD_ENABLE, + DISPC_MGR_FLD_STNTFT, + DISPC_MGR_FLD_GO, + DISPC_MGR_FLD_TFTDATALINES, + DISPC_MGR_FLD_STALLMODE, + DISPC_MGR_FLD_TCKENABLE, + DISPC_MGR_FLD_TCKSELECTION, + DISPC_MGR_FLD_CPR, + DISPC_MGR_FLD_FIFOHANDCHECK, + /* used to maintain a count of the above fields */ + DISPC_MGR_FLD_NUM, +}; + +static const struct { + const char *name; + u32 vsync_irq; + u32 framedone_irq; + u32 sync_lost_irq; + struct reg_field reg_desc[DISPC_MGR_FLD_NUM]; +} mgr_desc[] = { + [OMAP_DSS_CHANNEL_LCD] = { + .name = "LCD", + .vsync_irq = DISPC_IRQ_VSYNC, + .framedone_irq = DISPC_IRQ_FRAMEDONE, + .sync_lost_irq = DISPC_IRQ_SYNC_LOST, + .reg_desc = { + [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 0, 0 }, + [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL, 3, 3 }, + [DISPC_MGR_FLD_GO] = { DISPC_CONTROL, 5, 5 }, + [DISPC_MGR_FLD_TFTDATALINES] = { DISPC_CONTROL, 9, 8 }, + [DISPC_MGR_FLD_STALLMODE] = { DISPC_CONTROL, 11, 11 }, + [DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG, 10, 10 }, + [DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG, 11, 11 }, + [DISPC_MGR_FLD_CPR] = { DISPC_CONFIG, 15, 15 }, + [DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG, 16, 16 }, + }, + }, + [OMAP_DSS_CHANNEL_DIGIT] = { + .name = "DIGIT", + .vsync_irq = DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN, + .framedone_irq = 0, + .sync_lost_irq = DISPC_IRQ_SYNC_LOST_DIGIT, + .reg_desc = { + [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL, 1, 1 }, + [DISPC_MGR_FLD_STNTFT] = { }, + [DISPC_MGR_FLD_GO] = { DISPC_CONTROL, 6, 6 }, + [DISPC_MGR_FLD_TFTDATALINES] = { }, + [DISPC_MGR_FLD_STALLMODE] = { }, + [DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG, 12, 12 }, + [DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG, 13, 13 }, + [DISPC_MGR_FLD_CPR] = { }, + [DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG, 16, 16 }, + }, + }, + [OMAP_DSS_CHANNEL_LCD2] = { + .name = "LCD2", + .vsync_irq = DISPC_IRQ_VSYNC2, + .framedone_irq = DISPC_IRQ_FRAMEDONE2, + .sync_lost_irq = DISPC_IRQ_SYNC_LOST2, + .reg_desc = { + [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL2, 0, 0 }, + [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL2, 3, 3 }, + [DISPC_MGR_FLD_GO] = { DISPC_CONTROL2, 5, 5 }, + [DISPC_MGR_FLD_TFTDATALINES] = { DISPC_CONTROL2, 9, 8 }, + [DISPC_MGR_FLD_STALLMODE] = { DISPC_CONTROL2, 11, 11 }, + [DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG2, 10, 10 }, + [DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG2, 11, 11 }, + [DISPC_MGR_FLD_CPR] = { DISPC_CONFIG2, 15, 15 }, + [DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG2, 16, 16 }, + }, + }, + [OMAP_DSS_CHANNEL_LCD3] = { + .name = "LCD3", + .vsync_irq = DISPC_IRQ_VSYNC3, + .framedone_irq = DISPC_IRQ_FRAMEDONE3, + .sync_lost_irq = DISPC_IRQ_SYNC_LOST3, + .reg_desc = { + [DISPC_MGR_FLD_ENABLE] = { DISPC_CONTROL3, 0, 0 }, + [DISPC_MGR_FLD_STNTFT] = { DISPC_CONTROL3, 3, 3 }, + [DISPC_MGR_FLD_GO] = { DISPC_CONTROL3, 5, 5 }, + [DISPC_MGR_FLD_TFTDATALINES] = { DISPC_CONTROL3, 9, 8 }, + [DISPC_MGR_FLD_STALLMODE] = { DISPC_CONTROL3, 11, 11 }, + [DISPC_MGR_FLD_TCKENABLE] = { DISPC_CONFIG3, 10, 10 }, + [DISPC_MGR_FLD_TCKSELECTION] = { DISPC_CONFIG3, 11, 11 }, + [DISPC_MGR_FLD_CPR] = { DISPC_CONFIG3, 15, 15 }, + [DISPC_MGR_FLD_FIFOHANDCHECK] = { DISPC_CONFIG3, 16, 16 }, + }, + }, +}; + static void _omap_dispc_set_irqs(void); static inline void dispc_write_reg(const u16 idx, u32 val) @@ -131,6 +222,18 @@ static inline u32 dispc_read_reg(const u16 idx) return __raw_readl(dispc.base + idx); } +static u32 mgr_fld_read(enum omap_channel channel, enum mgr_reg_fields regfld) +{ + const struct reg_field rfld = mgr_desc[channel].reg_desc[regfld]; + return REG_GET(rfld.reg, rfld.high, rfld.low); +} + +static void mgr_fld_write(enum omap_channel channel, + enum mgr_reg_fields regfld, int val) { + const struct reg_field rfld = mgr_desc[channel].reg_desc[regfld]; + REG_FLD_MOD(rfld.reg, val, rfld.high, rfld.low); +} + #define SR(reg) \ dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg) #define RR(reg) \ @@ -153,6 +256,10 @@ static void dispc_save_context(void) SR(CONTROL2); SR(CONFIG2); } + if (dss_has_feature(FEAT_MGR_LCD3)) { + SR(CONTROL3); + SR(CONFIG3); + } for (i = 0; i < dss_feat_get_num_mgrs(); i++) { SR(DEFAULT_COLOR(i)); @@ -266,6 +373,8 @@ static void dispc_restore_context(void) RR(GLOBAL_ALPHA); if (dss_has_feature(FEAT_MGR_LCD2)) RR(CONFIG2); + if (dss_has_feature(FEAT_MGR_LCD3)) + RR(CONFIG3); for (i = 0; i < dss_feat_get_num_mgrs(); i++) { RR(DEFAULT_COLOR(i)); @@ -351,6 +460,8 @@ static void dispc_restore_context(void) RR(CONTROL); if (dss_has_feature(FEAT_MGR_LCD2)) RR(CONTROL2); + if (dss_has_feature(FEAT_MGR_LCD3)) + RR(CONTROL3); /* clear spurious SYNC_LOST_DIGIT interrupts */ dispc_write_reg(DISPC_IRQSTATUS, DISPC_IRQ_SYNC_LOST_DIGIT); @@ -387,101 +498,41 @@ void dispc_runtime_put(void) WARN_ON(r < 0 && r != -ENOSYS); } -static inline bool dispc_mgr_is_lcd(enum omap_channel channel) -{ - if (channel == OMAP_DSS_CHANNEL_LCD || - channel == OMAP_DSS_CHANNEL_LCD2) - return true; - else - return false; -} - u32 dispc_mgr_get_vsync_irq(enum omap_channel channel) { - switch (channel) { - case OMAP_DSS_CHANNEL_LCD: - return DISPC_IRQ_VSYNC; - case OMAP_DSS_CHANNEL_LCD2: - return DISPC_IRQ_VSYNC2; - case OMAP_DSS_CHANNEL_DIGIT: - return DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN; - default: - BUG(); - return 0; - } + return mgr_desc[channel].vsync_irq; } u32 dispc_mgr_get_framedone_irq(enum omap_channel channel) { - switch (channel) { - case OMAP_DSS_CHANNEL_LCD: - return DISPC_IRQ_FRAMEDONE; - case OMAP_DSS_CHANNEL_LCD2: - return DISPC_IRQ_FRAMEDONE2; - case OMAP_DSS_CHANNEL_DIGIT: - return 0; - default: - BUG(); - return 0; - } + return mgr_desc[channel].framedone_irq; } bool dispc_mgr_go_busy(enum omap_channel channel) { - int bit; - - if (dispc_mgr_is_lcd(channel)) - bit = 5; /* GOLCD */ - else - bit = 6; /* GODIGIT */ - - if (channel == OMAP_DSS_CHANNEL_LCD2) - return REG_GET(DISPC_CONTROL2, bit, bit) == 1; - else - return REG_GET(DISPC_CONTROL, bit, bit) == 1; + return mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1; } void dispc_mgr_go(enum omap_channel channel) { - int bit; bool enable_bit, go_bit; - if (dispc_mgr_is_lcd(channel)) - bit = 0; /* LCDENABLE */ - else - bit = 1; /* DIGITALENABLE */ - /* if the channel is not enabled, we don't need GO */ - if (channel == OMAP_DSS_CHANNEL_LCD2) - enable_bit = REG_GET(DISPC_CONTROL2, bit, bit) == 1; - else - enable_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1; + enable_bit = mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE) == 1; if (!enable_bit) return; - if (dispc_mgr_is_lcd(channel)) - bit = 5; /* GOLCD */ - else - bit = 6; /* GODIGIT */ - - if (channel == OMAP_DSS_CHANNEL_LCD2) - go_bit = REG_GET(DISPC_CONTROL2, bit, bit) == 1; - else - go_bit = REG_GET(DISPC_CONTROL, bit, bit) == 1; + go_bit = mgr_fld_read(channel, DISPC_MGR_FLD_GO) == 1; if (go_bit) { DSSERR("GO bit not down for channel %d\n", channel); return; } - DSSDBG("GO %s\n", channel == OMAP_DSS_CHANNEL_LCD ? "LCD" : - (channel == OMAP_DSS_CHANNEL_LCD2 ? "LCD2" : "DIGIT")); + DSSDBG("GO %s\n", mgr_desc[channel].name); - if (channel == OMAP_DSS_CHANNEL_LCD2) - REG_FLD_MOD(DISPC_CONTROL2, 1, bit, bit); - else - REG_FLD_MOD(DISPC_CONTROL, 1, bit, bit); + mgr_fld_write(channel, DISPC_MGR_FLD_GO, 1); } static void dispc_ovl_write_firh_reg(enum omap_plane plane, int reg, u32 value) @@ -832,6 +883,15 @@ void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel) chan = 0; chan2 = 1; break; + case OMAP_DSS_CHANNEL_LCD3: + if (dss_has_feature(FEAT_MGR_LCD3)) { + chan = 0; + chan2 = 2; + } else { + BUG(); + return; + } + break; default: BUG(); return; @@ -867,7 +927,14 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane) val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); - if (dss_has_feature(FEAT_MGR_LCD2)) { + if (dss_has_feature(FEAT_MGR_LCD3)) { + if (FLD_GET(val, 31, 30) == 0) + channel = FLD_GET(val, shift, shift); + else if (FLD_GET(val, 31, 30) == 1) + channel = OMAP_DSS_CHANNEL_LCD2; + else + channel = OMAP_DSS_CHANNEL_LCD3; + } else if (dss_has_feature(FEAT_MGR_LCD2)) { if (FLD_GET(val, 31, 30) == 0) channel = FLD_GET(val, shift, shift); else @@ -922,16 +989,10 @@ void dispc_enable_gamma_table(bool enable) static void dispc_mgr_enable_cpr(enum omap_channel channel, bool enable) { - u16 reg; - - if (channel == OMAP_DSS_CHANNEL_LCD) - reg = DISPC_CONFIG; - else if (channel == OMAP_DSS_CHANNEL_LCD2) - reg = DISPC_CONFIG2; - else + if (channel == OMAP_DSS_CHANNEL_DIGIT) return; - REG_FLD_MOD(reg, enable, 15, 15); + mgr_fld_write(channel, DISPC_MGR_FLD_CPR, enable); } static void dispc_mgr_set_cpr_coef(enum omap_channel channel, @@ -939,7 +1000,7 @@ static void dispc_mgr_set_cpr_coef(enum omap_channel channel, { u32 coef_r, coef_g, coef_b; - if (!dispc_mgr_is_lcd(channel)) + if (!dss_mgr_is_lcd(channel)) return; coef_r = FLD_VAL(coefs->rr, 31, 22) | FLD_VAL(coefs->rg, 20, 11) | @@ -1798,7 +1859,7 @@ static int check_horiz_timing_omap3(enum omap_channel channel, nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width; pclk = dispc_mgr_pclk_rate(channel); - if (dispc_mgr_is_lcd(channel)) + if (dss_mgr_is_lcd(channel)) lclk = dispc_mgr_lclk_rate(channel); else lclk = dispc_fclk_rate(); @@ -2086,8 +2147,7 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane, } int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, - bool ilace, bool replication, - const struct omap_video_timings *mgr_timings) + bool replication, const struct omap_video_timings *mgr_timings) { struct omap_overlay *ovl = omap_dss_get_overlay(plane); bool five_taps = true; @@ -2103,6 +2163,7 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, u16 out_width, out_height; enum omap_channel channel; int x_predecim = 1, y_predecim = 1; + bool ilace = mgr_timings->interlace; channel = dispc_ovl_get_channel_out(plane); @@ -2254,14 +2315,9 @@ static void dispc_disable_isr(void *data, u32 mask) static void _enable_lcd_out(enum omap_channel channel, bool enable) { - if (channel == OMAP_DSS_CHANNEL_LCD2) { - REG_FLD_MOD(DISPC_CONTROL2, enable ? 1 : 0, 0, 0); - /* flush posted write */ - dispc_read_reg(DISPC_CONTROL2); - } else { - REG_FLD_MOD(DISPC_CONTROL, enable ? 1 : 0, 0, 0); - dispc_read_reg(DISPC_CONTROL); - } + mgr_fld_write(channel, DISPC_MGR_FLD_ENABLE, enable); + /* flush posted write */ + mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE); } static void dispc_mgr_enable_lcd_out(enum omap_channel channel, bool enable) @@ -2274,12 +2330,9 @@ static void dispc_mgr_enable_lcd_out(enum omap_channel channel, bool enable) /* When we disable LCD output, we need to wait until frame is done. * Otherwise the DSS is still working, and turning off the clocks * prevents DSS from going to OFF mode */ - is_on = channel == OMAP_DSS_CHANNEL_LCD2 ? - REG_GET(DISPC_CONTROL2, 0, 0) : - REG_GET(DISPC_CONTROL, 0, 0); + is_on = mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE); - irq = channel == OMAP_DSS_CHANNEL_LCD2 ? DISPC_IRQ_FRAMEDONE2 : - DISPC_IRQ_FRAMEDONE; + irq = mgr_desc[channel].framedone_irq; if (!enable && is_on) { init_completion(&frame_done_completion); @@ -2384,21 +2437,12 @@ static void dispc_mgr_enable_digit_out(bool enable) bool dispc_mgr_is_enabled(enum omap_channel channel) { - if (channel == OMAP_DSS_CHANNEL_LCD) - return !!REG_GET(DISPC_CONTROL, 0, 0); - else if (channel == OMAP_DSS_CHANNEL_DIGIT) - return !!REG_GET(DISPC_CONTROL, 1, 1); - else if (channel == OMAP_DSS_CHANNEL_LCD2) - return !!REG_GET(DISPC_CONTROL2, 0, 0); - else { - BUG(); - return false; - } + return !!mgr_fld_read(channel, DISPC_MGR_FLD_ENABLE); } void dispc_mgr_enable(enum omap_channel channel, bool enable) { - if (dispc_mgr_is_lcd(channel)) + if (dss_mgr_is_lcd(channel)) dispc_mgr_enable_lcd_out(channel, enable); else if (channel == OMAP_DSS_CHANNEL_DIGIT) dispc_mgr_enable_digit_out(enable); @@ -2432,36 +2476,13 @@ void dispc_pck_free_enable(bool enable) void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable) { - if (channel == OMAP_DSS_CHANNEL_LCD2) - REG_FLD_MOD(DISPC_CONFIG2, enable ? 1 : 0, 16, 16); - else - REG_FLD_MOD(DISPC_CONFIG, enable ? 1 : 0, 16, 16); + mgr_fld_write(channel, DISPC_MGR_FLD_FIFOHANDCHECK, enable); } -void dispc_mgr_set_lcd_display_type(enum omap_channel channel, - enum omap_lcd_display_type type) +void dispc_mgr_set_lcd_type_tft(enum omap_channel channel) { - int mode; - - switch (type) { - case OMAP_DSS_LCD_DISPLAY_STN: - mode = 0; - break; - - case OMAP_DSS_LCD_DISPLAY_TFT: - mode = 1; - break; - - default: - BUG(); - return; - } - - if (channel == OMAP_DSS_CHANNEL_LCD2) - REG_FLD_MOD(DISPC_CONTROL2, mode, 3, 3); - else - REG_FLD_MOD(DISPC_CONTROL, mode, 3, 3); + mgr_fld_write(channel, DISPC_MGR_FLD_STNTFT, 1); } void dispc_set_loadmode(enum omap_dss_load_mode mode) @@ -2479,24 +2500,14 @@ static void dispc_mgr_set_trans_key(enum omap_channel ch, enum omap_dss_trans_key_type type, u32 trans_key) { - if (ch == OMAP_DSS_CHANNEL_LCD) - REG_FLD_MOD(DISPC_CONFIG, type, 11, 11); - else if (ch == OMAP_DSS_CHANNEL_DIGIT) - REG_FLD_MOD(DISPC_CONFIG, type, 13, 13); - else /* OMAP_DSS_CHANNEL_LCD2 */ - REG_FLD_MOD(DISPC_CONFIG2, type, 11, 11); + mgr_fld_write(ch, DISPC_MGR_FLD_TCKSELECTION, type); dispc_write_reg(DISPC_TRANS_COLOR(ch), trans_key); } static void dispc_mgr_enable_trans_key(enum omap_channel ch, bool enable) { - if (ch == OMAP_DSS_CHANNEL_LCD) - REG_FLD_MOD(DISPC_CONFIG, enable, 10, 10); - else if (ch == OMAP_DSS_CHANNEL_DIGIT) - REG_FLD_MOD(DISPC_CONFIG, enable, 12, 12); - else /* OMAP_DSS_CHANNEL_LCD2 */ - REG_FLD_MOD(DISPC_CONFIG2, enable, 10, 10); + mgr_fld_write(ch, DISPC_MGR_FLD_TCKENABLE, enable); } static void dispc_mgr_enable_alpha_fixed_zorder(enum omap_channel ch, @@ -2547,10 +2558,7 @@ void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines) return; } - if (channel == OMAP_DSS_CHANNEL_LCD2) - REG_FLD_MOD(DISPC_CONTROL2, code, 9, 8); - else - REG_FLD_MOD(DISPC_CONTROL, code, 9, 8); + mgr_fld_write(channel, DISPC_MGR_FLD_TFTDATALINES, code); } void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode) @@ -2584,10 +2592,7 @@ void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode) void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable) { - if (channel == OMAP_DSS_CHANNEL_LCD2) - REG_FLD_MOD(DISPC_CONTROL2, enable, 11, 11); - else - REG_FLD_MOD(DISPC_CONTROL, enable, 11, 11); + mgr_fld_write(channel, DISPC_MGR_FLD_STALLMODE, enable); } static bool _dispc_mgr_size_ok(u16 width, u16 height) @@ -2627,7 +2632,7 @@ bool dispc_mgr_timings_ok(enum omap_channel channel, timings_ok = _dispc_mgr_size_ok(timings->x_res, timings->y_res); - if (dispc_mgr_is_lcd(channel)) + if (dss_mgr_is_lcd(channel)) timings_ok = timings_ok && _dispc_lcd_timings_ok(timings->hsw, timings->hfp, timings->hbp, timings->vsw, timings->vfp, @@ -2637,9 +2642,16 @@ bool dispc_mgr_timings_ok(enum omap_channel channel, } static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw, - int hfp, int hbp, int vsw, int vfp, int vbp) + int hfp, int hbp, int vsw, int vfp, int vbp, + enum omap_dss_signal_level vsync_level, + enum omap_dss_signal_level hsync_level, + enum omap_dss_signal_edge data_pclk_edge, + enum omap_dss_signal_level de_level, + enum omap_dss_signal_edge sync_pclk_edge) + { - u32 timing_h, timing_v; + u32 timing_h, timing_v, l; + bool onoff, rf, ipc; if (cpu_is_omap24xx() || omap_rev() < OMAP3430_REV_ES3_0) { timing_h = FLD_VAL(hsw-1, 5, 0) | FLD_VAL(hfp-1, 15, 8) | @@ -2657,6 +2669,44 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw, dispc_write_reg(DISPC_TIMING_H(channel), timing_h); dispc_write_reg(DISPC_TIMING_V(channel), timing_v); + + switch (data_pclk_edge) { + case OMAPDSS_DRIVE_SIG_RISING_EDGE: + ipc = false; + break; + case OMAPDSS_DRIVE_SIG_FALLING_EDGE: + ipc = true; + break; + case OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES: + default: + BUG(); + } + + switch (sync_pclk_edge) { + case OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES: + onoff = false; + rf = false; + break; + case OMAPDSS_DRIVE_SIG_FALLING_EDGE: + onoff = true; + rf = false; + break; + case OMAPDSS_DRIVE_SIG_RISING_EDGE: + onoff = true; + rf = true; + break; + default: + BUG(); + }; + + l = dispc_read_reg(DISPC_POL_FREQ(channel)); + l |= FLD_VAL(onoff, 17, 17); + l |= FLD_VAL(rf, 16, 16); + l |= FLD_VAL(de_level, 15, 15); + l |= FLD_VAL(ipc, 14, 14); + l |= FLD_VAL(hsync_level, 13, 13); + l |= FLD_VAL(vsync_level, 12, 12); + dispc_write_reg(DISPC_POL_FREQ(channel), l); } /* change name to mode? */ @@ -2674,9 +2724,10 @@ void dispc_mgr_set_timings(enum omap_channel channel, return; } - if (dispc_mgr_is_lcd(channel)) { + if (dss_mgr_is_lcd(channel)) { _dispc_mgr_set_lcd_timings(channel, t.hsw, t.hfp, t.hbp, t.vsw, - t.vfp, t.vbp); + t.vfp, t.vbp, t.vsync_level, t.hsync_level, + t.data_pclk_edge, t.de_level, t.sync_pclk_edge); xtot = t.x_res + t.hfp + t.hsw + t.hbp; ytot = t.y_res + t.vfp + t.vsw + t.vbp; @@ -2687,14 +2738,13 @@ void dispc_mgr_set_timings(enum omap_channel channel, DSSDBG("pck %u\n", timings->pixel_clock); DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n", t.hsw, t.hfp, t.hbp, t.vsw, t.vfp, t.vbp); + DSSDBG("vsync_level %d hsync_level %d data_pclk_edge %d de_level %d sync_pclk_edge %d\n", + t.vsync_level, t.hsync_level, t.data_pclk_edge, + t.de_level, t.sync_pclk_edge); DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt); } else { - enum dss_hdmi_venc_clk_source_select source; - - source = dss_get_hdmi_venc_clk_source(); - - if (source == DSS_VENC_TV_CLK) + if (t.interlace == true) t.y_res /= 2; } @@ -2780,7 +2830,7 @@ unsigned long dispc_mgr_pclk_rate(enum omap_channel channel) { unsigned long r; - if (dispc_mgr_is_lcd(channel)) { + if (dss_mgr_is_lcd(channel)) { int pcd; u32 l; @@ -2821,12 +2871,32 @@ unsigned long dispc_core_clk_rate(void) return fclk / lcd; } -void dispc_dump_clocks(struct seq_file *s) +static void dispc_dump_clocks_channel(struct seq_file *s, enum omap_channel channel) { int lcd, pcd; + enum omap_dss_clk_source lcd_clk_src; + + seq_printf(s, "- %s -\n", mgr_desc[channel].name); + + lcd_clk_src = dss_get_lcd_clk_source(channel); + + seq_printf(s, "%s clk source = %s (%s)\n", mgr_desc[channel].name, + dss_get_generic_clk_source_name(lcd_clk_src), + dss_feat_get_clk_source_name(lcd_clk_src)); + + dispc_mgr_get_lcd_divisor(channel, &lcd, &pcd); + + seq_printf(s, "lck\t\t%-16lulck div\t%u\n", + dispc_mgr_lclk_rate(channel), lcd); + seq_printf(s, "pck\t\t%-16lupck div\t%u\n", + dispc_mgr_pclk_rate(channel), pcd); +} + +void dispc_dump_clocks(struct seq_file *s) +{ + int lcd; u32 l; enum omap_dss_clk_source dispc_clk_src = dss_get_dispc_clk_source(); - enum omap_dss_clk_source lcd_clk_src; if (dispc_runtime_get()) return; @@ -2847,36 +2917,13 @@ void dispc_dump_clocks(struct seq_file *s) seq_printf(s, "lck\t\t%-16lulck div\t%u\n", (dispc_fclk_rate()/lcd), lcd); } - seq_printf(s, "- LCD1 -\n"); - - lcd_clk_src = dss_get_lcd_clk_source(OMAP_DSS_CHANNEL_LCD); - - seq_printf(s, "lcd1_clk source = %s (%s)\n", - dss_get_generic_clk_source_name(lcd_clk_src), - dss_feat_get_clk_source_name(lcd_clk_src)); - - dispc_mgr_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD, &lcd, &pcd); - - seq_printf(s, "lck\t\t%-16lulck div\t%u\n", - dispc_mgr_lclk_rate(OMAP_DSS_CHANNEL_LCD), lcd); - seq_printf(s, "pck\t\t%-16lupck div\t%u\n", - dispc_mgr_pclk_rate(OMAP_DSS_CHANNEL_LCD), pcd); - if (dss_has_feature(FEAT_MGR_LCD2)) { - seq_printf(s, "- LCD2 -\n"); - - lcd_clk_src = dss_get_lcd_clk_source(OMAP_DSS_CHANNEL_LCD2); - seq_printf(s, "lcd2_clk source = %s (%s)\n", - dss_get_generic_clk_source_name(lcd_clk_src), - dss_feat_get_clk_source_name(lcd_clk_src)); + dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD); - dispc_mgr_get_lcd_divisor(OMAP_DSS_CHANNEL_LCD2, &lcd, &pcd); - - seq_printf(s, "lck\t\t%-16lulck div\t%u\n", - dispc_mgr_lclk_rate(OMAP_DSS_CHANNEL_LCD2), lcd); - seq_printf(s, "pck\t\t%-16lupck div\t%u\n", - dispc_mgr_pclk_rate(OMAP_DSS_CHANNEL_LCD2), pcd); - } + if (dss_has_feature(FEAT_MGR_LCD2)) + dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD2); + if (dss_has_feature(FEAT_MGR_LCD3)) + dispc_dump_clocks_channel(s, OMAP_DSS_CHANNEL_LCD3); dispc_runtime_put(); } @@ -2929,6 +2976,12 @@ void dispc_dump_irqs(struct seq_file *s) PIS(ACBIAS_COUNT_STAT2); PIS(SYNC_LOST2); } + if (dss_has_feature(FEAT_MGR_LCD3)) { + PIS(FRAMEDONE3); + PIS(VSYNC3); + PIS(ACBIAS_COUNT_STAT3); + PIS(SYNC_LOST3); + } #undef PIS } #endif @@ -2940,6 +2993,7 @@ static void dispc_dump_regs(struct seq_file *s) [OMAP_DSS_CHANNEL_LCD] = "LCD", [OMAP_DSS_CHANNEL_DIGIT] = "TV", [OMAP_DSS_CHANNEL_LCD2] = "LCD2", + [OMAP_DSS_CHANNEL_LCD3] = "LCD3", }; const char *ovl_names[] = { [OMAP_DSS_GFX] = "GFX", @@ -2972,6 +3026,10 @@ static void dispc_dump_regs(struct seq_file *s) DUMPREG(DISPC_CONTROL2); DUMPREG(DISPC_CONFIG2); } + if (dss_has_feature(FEAT_MGR_LCD3)) { + DUMPREG(DISPC_CONTROL3); + DUMPREG(DISPC_CONFIG3); + } #undef DUMPREG @@ -3093,41 +3151,8 @@ static void dispc_dump_regs(struct seq_file *s) #undef DUMPREG } -static void _dispc_mgr_set_pol_freq(enum omap_channel channel, bool onoff, - bool rf, bool ieo, bool ipc, bool ihs, bool ivs, u8 acbi, - u8 acb) -{ - u32 l = 0; - - DSSDBG("onoff %d rf %d ieo %d ipc %d ihs %d ivs %d acbi %d acb %d\n", - onoff, rf, ieo, ipc, ihs, ivs, acbi, acb); - - l |= FLD_VAL(onoff, 17, 17); - l |= FLD_VAL(rf, 16, 16); - l |= FLD_VAL(ieo, 15, 15); - l |= FLD_VAL(ipc, 14, 14); - l |= FLD_VAL(ihs, 13, 13); - l |= FLD_VAL(ivs, 12, 12); - l |= FLD_VAL(acbi, 11, 8); - l |= FLD_VAL(acb, 7, 0); - - dispc_write_reg(DISPC_POL_FREQ(channel), l); -} - -void dispc_mgr_set_pol_freq(enum omap_channel channel, - enum omap_panel_config config, u8 acbi, u8 acb) -{ - _dispc_mgr_set_pol_freq(channel, (config & OMAP_DSS_LCD_ONOFF) != 0, - (config & OMAP_DSS_LCD_RF) != 0, - (config & OMAP_DSS_LCD_IEO) != 0, - (config & OMAP_DSS_LCD_IPC) != 0, - (config & OMAP_DSS_LCD_IHS) != 0, - (config & OMAP_DSS_LCD_IVS) != 0, - acbi, acb); -} - /* with fck as input clock rate, find dispc dividers that produce req_pck */ -void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck, +void dispc_find_clk_divs(unsigned long req_pck, unsigned long fck, struct dispc_clock_info *cinfo) { u16 pcd_min, pcd_max; @@ -3138,9 +3163,6 @@ void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck, pcd_min = dss_feat_get_param_min(FEAT_PARAM_DSS_PCD); pcd_max = dss_feat_get_param_max(FEAT_PARAM_DSS_PCD); - if (!is_tft) - pcd_min = 3; - best_pck = 0; best_ld = 0; best_pd = 0; @@ -3192,15 +3214,13 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate, return 0; } -int dispc_mgr_set_clock_div(enum omap_channel channel, +void dispc_mgr_set_clock_div(enum omap_channel channel, struct dispc_clock_info *cinfo) { DSSDBG("lck = %lu (%u)\n", cinfo->lck, cinfo->lck_div); DSSDBG("pck = %lu (%u)\n", cinfo->pck, cinfo->pck_div); dispc_mgr_set_lcd_divisor(channel, cinfo->lck_div, cinfo->pck_div); - - return 0; } int dispc_mgr_get_clock_div(enum omap_channel channel, @@ -3354,6 +3374,8 @@ static void print_irq_status(u32 status) PIS(SYNC_LOST_DIGIT); if (dss_has_feature(FEAT_MGR_LCD2)) PIS(SYNC_LOST2); + if (dss_has_feature(FEAT_MGR_LCD3)) + PIS(SYNC_LOST3); #undef PIS printk("\n"); @@ -3450,12 +3472,6 @@ static void dispc_error_worker(struct work_struct *work) DISPC_IRQ_VID3_FIFO_UNDERFLOW, }; - static const unsigned sync_lost_bits[] = { - DISPC_IRQ_SYNC_LOST, - DISPC_IRQ_SYNC_LOST_DIGIT, - DISPC_IRQ_SYNC_LOST2, - }; - spin_lock_irqsave(&dispc.irq_lock, flags); errors = dispc.error_irqs; dispc.error_irqs = 0; @@ -3484,7 +3500,7 @@ static void dispc_error_worker(struct work_struct *work) unsigned bit; mgr = omap_dss_get_overlay_manager(i); - bit = sync_lost_bits[i]; + bit = mgr_desc[i].sync_lost_irq; if (bit & errors) { struct omap_dss_device *dssdev = mgr->device; @@ -3603,6 +3619,8 @@ static void _omap_dispc_initialize_irq(void) dispc.irq_error_mask = DISPC_IRQ_MASK_ERROR; if (dss_has_feature(FEAT_MGR_LCD2)) dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST2; + if (dss_has_feature(FEAT_MGR_LCD3)) + dispc.irq_error_mask |= DISPC_IRQ_SYNC_LOST3; if (dss_feat_get_num_ovls() > 3) dispc.irq_error_mask |= DISPC_IRQ_VID3_FIFO_UNDERFLOW; diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h index f278080e1063..92d8a9be86fc 100644 --- a/drivers/video/omap2/dss/dispc.h +++ b/drivers/video/omap2/dss/dispc.h @@ -36,6 +36,8 @@ #define DISPC_CONTROL2 0x0238 #define DISPC_CONFIG2 0x0620 #define DISPC_DIVISOR 0x0804 +#define DISPC_CONTROL3 0x0848 +#define DISPC_CONFIG3 0x084C /* DISPC overlay registers */ #define DISPC_OVL_BA0(n) (DISPC_OVL_BASE(n) + \ @@ -118,6 +120,8 @@ static inline u16 DISPC_DEFAULT_COLOR(enum omap_channel channel) return 0x0050; case OMAP_DSS_CHANNEL_LCD2: return 0x03AC; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0814; default: BUG(); return 0; @@ -133,6 +137,8 @@ static inline u16 DISPC_TRANS_COLOR(enum omap_channel channel) return 0x0058; case OMAP_DSS_CHANNEL_LCD2: return 0x03B0; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0818; default: BUG(); return 0; @@ -149,6 +155,8 @@ static inline u16 DISPC_TIMING_H(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x0400; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0840; default: BUG(); return 0; @@ -165,6 +173,8 @@ static inline u16 DISPC_TIMING_V(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x0404; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0844; default: BUG(); return 0; @@ -181,6 +191,8 @@ static inline u16 DISPC_POL_FREQ(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x0408; + case OMAP_DSS_CHANNEL_LCD3: + return 0x083C; default: BUG(); return 0; @@ -197,6 +209,8 @@ static inline u16 DISPC_DIVISORo(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x040C; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0838; default: BUG(); return 0; @@ -213,6 +227,8 @@ static inline u16 DISPC_SIZE_MGR(enum omap_channel channel) return 0x0078; case OMAP_DSS_CHANNEL_LCD2: return 0x03CC; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0834; default: BUG(); return 0; @@ -229,6 +245,8 @@ static inline u16 DISPC_DATA_CYCLE1(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03C0; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0828; default: BUG(); return 0; @@ -245,6 +263,8 @@ static inline u16 DISPC_DATA_CYCLE2(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03C4; + case OMAP_DSS_CHANNEL_LCD3: + return 0x082C; default: BUG(); return 0; @@ -261,6 +281,8 @@ static inline u16 DISPC_DATA_CYCLE3(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03C8; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0830; default: BUG(); return 0; @@ -277,6 +299,8 @@ static inline u16 DISPC_CPR_COEF_R(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03BC; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0824; default: BUG(); return 0; @@ -293,6 +317,8 @@ static inline u16 DISPC_CPR_COEF_G(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03B8; + case OMAP_DSS_CHANNEL_LCD3: + return 0x0820; default: BUG(); return 0; @@ -309,6 +335,8 @@ static inline u16 DISPC_CPR_COEF_B(enum omap_channel channel) return 0; case OMAP_DSS_CHANNEL_LCD2: return 0x03B4; + case OMAP_DSS_CHANNEL_LCD3: + return 0x081C; default: BUG(); return 0; diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c index 249010630370..5bd957e85505 100644 --- a/drivers/video/omap2/dss/display.c +++ b/drivers/video/omap2/dss/display.c @@ -116,7 +116,7 @@ static ssize_t display_timings_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct omap_dss_device *dssdev = to_dss_device(dev); - struct omap_video_timings t; + struct omap_video_timings t = dssdev->panel.timings; int r, found; if (!dssdev->driver->set_timings || !dssdev->driver->check_timings) @@ -316,44 +316,6 @@ void omapdss_default_get_timings(struct omap_dss_device *dssdev, } EXPORT_SYMBOL(omapdss_default_get_timings); -/* Checks if replication logic should be used. Only use for active matrix, - * when overlay is in RGB12U or RGB16 mode, and LCD interface is - * 18bpp or 24bpp */ -bool dss_use_replication(struct omap_dss_device *dssdev, - enum omap_color_mode mode) -{ - int bpp; - - if (mode != OMAP_DSS_COLOR_RGB12U && mode != OMAP_DSS_COLOR_RGB16) - return false; - - if (dssdev->type == OMAP_DISPLAY_TYPE_DPI && - (dssdev->panel.config & OMAP_DSS_LCD_TFT) == 0) - return false; - - switch (dssdev->type) { - case OMAP_DISPLAY_TYPE_DPI: - bpp = dssdev->phy.dpi.data_lines; - break; - case OMAP_DISPLAY_TYPE_HDMI: - case OMAP_DISPLAY_TYPE_VENC: - case OMAP_DISPLAY_TYPE_SDI: - bpp = 24; - break; - case OMAP_DISPLAY_TYPE_DBI: - bpp = dssdev->ctrl.pixel_size; - break; - case OMAP_DISPLAY_TYPE_DSI: - bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt); - break; - default: - BUG(); - return false; - } - - return bpp > 16; -} - void dss_init_device(struct platform_device *pdev, struct omap_dss_device *dssdev) { diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c index 8c2056c9537b..3266be23fc0d 100644 --- a/drivers/video/omap2/dss/dpi.c +++ b/drivers/video/omap2/dss/dpi.c @@ -38,6 +38,8 @@ static struct { struct regulator *vdds_dsi_reg; struct platform_device *dsidev; + + struct dss_lcd_mgr_config mgr_config; } dpi; static struct platform_device *dpi_get_dsidev(enum omap_dss_clk_source clk) @@ -64,7 +66,7 @@ static bool dpi_use_dsi_pll(struct omap_dss_device *dssdev) return false; } -static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft, +static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, unsigned long pck_req, unsigned long *fck, int *lck_div, int *pck_div) { @@ -72,8 +74,8 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft, struct dispc_clock_info dispc_cinfo; int r; - r = dsi_pll_calc_clock_div_pck(dpi.dsidev, is_tft, pck_req, - &dsi_cinfo, &dispc_cinfo); + r = dsi_pll_calc_clock_div_pck(dpi.dsidev, pck_req, &dsi_cinfo, + &dispc_cinfo); if (r) return r; @@ -83,11 +85,7 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft, dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src); - r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo); - if (r) { - dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); - return r; - } + dpi.mgr_config.clock_info = dispc_cinfo; *fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk; *lck_div = dispc_cinfo.lck_div; @@ -96,7 +94,7 @@ static int dpi_set_dsi_clk(struct omap_dss_device *dssdev, bool is_tft, return 0; } -static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft, +static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, unsigned long pck_req, unsigned long *fck, int *lck_div, int *pck_div) { @@ -104,7 +102,7 @@ static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft, struct dispc_clock_info dispc_cinfo; int r; - r = dss_calc_clock_div(is_tft, pck_req, &dss_cinfo, &dispc_cinfo); + r = dss_calc_clock_div(pck_req, &dss_cinfo, &dispc_cinfo); if (r) return r; @@ -112,9 +110,7 @@ static int dpi_set_dispc_clk(struct omap_dss_device *dssdev, bool is_tft, if (r) return r; - r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo); - if (r) - return r; + dpi.mgr_config.clock_info = dispc_cinfo; *fck = dss_cinfo.fck; *lck_div = dispc_cinfo.lck_div; @@ -129,20 +125,14 @@ static int dpi_set_mode(struct omap_dss_device *dssdev) int lck_div = 0, pck_div = 0; unsigned long fck = 0; unsigned long pck; - bool is_tft; int r = 0; - dispc_mgr_set_pol_freq(dssdev->manager->id, dssdev->panel.config, - dssdev->panel.acbi, dssdev->panel.acb); - - is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0; - if (dpi_use_dsi_pll(dssdev)) - r = dpi_set_dsi_clk(dssdev, is_tft, t->pixel_clock * 1000, - &fck, &lck_div, &pck_div); + r = dpi_set_dsi_clk(dssdev, t->pixel_clock * 1000, &fck, + &lck_div, &pck_div); else - r = dpi_set_dispc_clk(dssdev, is_tft, t->pixel_clock * 1000, - &fck, &lck_div, &pck_div); + r = dpi_set_dispc_clk(dssdev, t->pixel_clock * 1000, &fck, + &lck_div, &pck_div); if (r) return r; @@ -161,19 +151,18 @@ static int dpi_set_mode(struct omap_dss_device *dssdev) return 0; } -static void dpi_basic_init(struct omap_dss_device *dssdev) +static void dpi_config_lcd_manager(struct omap_dss_device *dssdev) { - bool is_tft; + dpi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS; + + dpi.mgr_config.stallmode = false; + dpi.mgr_config.fifohandcheck = false; - is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0; + dpi.mgr_config.video_port_width = dssdev->phy.dpi.data_lines; - dispc_mgr_set_io_pad_mode(DSS_IO_PAD_MODE_BYPASS); - dispc_mgr_enable_stallmode(dssdev->manager->id, false); + dpi.mgr_config.lcden_sig_polarity = 0; - dispc_mgr_set_lcd_display_type(dssdev->manager->id, is_tft ? - OMAP_DSS_LCD_DISPLAY_TFT : OMAP_DSS_LCD_DISPLAY_STN); - dispc_mgr_set_tft_data_lines(dssdev->manager->id, - dssdev->phy.dpi.data_lines); + dss_mgr_set_lcd_config(dssdev->manager, &dpi.mgr_config); } int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) @@ -206,8 +195,6 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) if (r) goto err_get_dispc; - dpi_basic_init(dssdev); - if (dpi_use_dsi_pll(dssdev)) { r = dsi_runtime_get(dpi.dsidev); if (r) @@ -222,6 +209,8 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) if (r) goto err_set_mode; + dpi_config_lcd_manager(dssdev); + mdelay(2); r = dss_mgr_enable(dssdev->manager); @@ -292,7 +281,6 @@ EXPORT_SYMBOL(dpi_set_timings); int dpi_check_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { - bool is_tft; int r; int lck_div, pck_div; unsigned long fck; @@ -305,11 +293,9 @@ int dpi_check_timings(struct omap_dss_device *dssdev, if (timings->pixel_clock == 0) return -EINVAL; - is_tft = (dssdev->panel.config & OMAP_DSS_LCD_TFT) != 0; - if (dpi_use_dsi_pll(dssdev)) { struct dsi_clock_info dsi_cinfo; - r = dsi_pll_calc_clock_div_pck(dpi.dsidev, is_tft, + r = dsi_pll_calc_clock_div_pck(dpi.dsidev, timings->pixel_clock * 1000, &dsi_cinfo, &dispc_cinfo); @@ -319,7 +305,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev, fck = dsi_cinfo.dsi_pll_hsdiv_dispc_clk; } else { struct dss_clock_info dss_cinfo; - r = dss_calc_clock_div(is_tft, timings->pixel_clock * 1000, + r = dss_calc_clock_div(timings->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo); if (r) diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c index 14ce8cc079e3..b07e8864f82f 100644 --- a/drivers/video/omap2/dss/dsi.c +++ b/drivers/video/omap2/dss/dsi.c @@ -331,6 +331,8 @@ struct dsi_data { unsigned num_lanes_used; unsigned scp_clk_refcount; + + struct dss_lcd_mgr_config mgr_config; }; struct dsi_packet_sent_handler_data { @@ -1085,9 +1087,9 @@ static inline void dsi_enable_pll_clock(struct platform_device *dsidev, struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); if (enable) - clk_enable(dsi->sys_clk); + clk_prepare_enable(dsi->sys_clk); else - clk_disable(dsi->sys_clk); + clk_disable_unprepare(dsi->sys_clk); if (enable && dsi->pll_locked) { if (wait_for_bit_change(dsidev, DSI_PLL_STATUS, 1, 1) != 1) @@ -1316,7 +1318,7 @@ static int dsi_calc_clock_rates(struct platform_device *dsidev, return 0; } -int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft, +int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, unsigned long req_pck, struct dsi_clock_info *dsi_cinfo, struct dispc_clock_info *dispc_cinfo) { @@ -1335,8 +1337,8 @@ int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft, dsi->cache_cinfo.clkin == dss_sys_clk) { DSSDBG("DSI clock info found from cache\n"); *dsi_cinfo = dsi->cache_cinfo; - dispc_find_clk_divs(is_tft, req_pck, - dsi_cinfo->dsi_pll_hsdiv_dispc_clk, dispc_cinfo); + dispc_find_clk_divs(req_pck, dsi_cinfo->dsi_pll_hsdiv_dispc_clk, + dispc_cinfo); return 0; } @@ -1402,7 +1404,7 @@ retry: match = 1; - dispc_find_clk_divs(is_tft, req_pck, + dispc_find_clk_divs(req_pck, cur.dsi_pll_hsdiv_dispc_clk, &cur_dispc); @@ -3631,17 +3633,14 @@ static void dsi_config_vp_num_line_buffers(struct omap_dss_device *dssdev) static void dsi_config_vp_sync_events(struct omap_dss_device *dssdev) { struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); - int de_pol = dssdev->panel.dsi_vm_data.vp_de_pol; - int hsync_pol = dssdev->panel.dsi_vm_data.vp_hsync_pol; - int vsync_pol = dssdev->panel.dsi_vm_data.vp_vsync_pol; bool vsync_end = dssdev->panel.dsi_vm_data.vp_vsync_end; bool hsync_end = dssdev->panel.dsi_vm_data.vp_hsync_end; u32 r; r = dsi_read_reg(dsidev, DSI_CTRL); - r = FLD_MOD(r, de_pol, 9, 9); /* VP_DE_POL */ - r = FLD_MOD(r, hsync_pol, 10, 10); /* VP_HSYNC_POL */ - r = FLD_MOD(r, vsync_pol, 11, 11); /* VP_VSYNC_POL */ + r = FLD_MOD(r, 1, 9, 9); /* VP_DE_POL */ + r = FLD_MOD(r, 1, 10, 10); /* VP_HSYNC_POL */ + r = FLD_MOD(r, 1, 11, 11); /* VP_VSYNC_POL */ r = FLD_MOD(r, 1, 15, 15); /* VP_VSYNC_START */ r = FLD_MOD(r, vsync_end, 16, 16); /* VP_VSYNC_END */ r = FLD_MOD(r, 1, 17, 17); /* VP_HSYNC_START */ @@ -4340,52 +4339,101 @@ EXPORT_SYMBOL(omap_dsi_update); /* Display funcs */ +static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev) +{ + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct dispc_clock_info dispc_cinfo; + int r; + unsigned long long fck; + + fck = dsi_get_pll_hsdiv_dispc_rate(dsidev); + + dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div; + dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div; + + r = dispc_calc_clock_rates(fck, &dispc_cinfo); + if (r) { + DSSERR("Failed to calc dispc clocks\n"); + return r; + } + + dsi->mgr_config.clock_info = dispc_cinfo; + + return 0; +} + static int dsi_display_init_dispc(struct omap_dss_device *dssdev) { + struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); + struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); + struct omap_video_timings timings; int r; + u32 irq = 0; if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) { u16 dw, dh; - u32 irq; - struct omap_video_timings timings = { - .hsw = 1, - .hfp = 1, - .hbp = 1, - .vsw = 1, - .vfp = 0, - .vbp = 0, - }; dssdev->driver->get_resolution(dssdev, &dw, &dh); + timings.x_res = dw; timings.y_res = dh; + timings.hsw = 1; + timings.hfp = 1; + timings.hbp = 1; + timings.vsw = 1; + timings.vfp = 0; + timings.vbp = 0; - irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ? - DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2; + irq = dispc_mgr_get_framedone_irq(dssdev->manager->id); r = omap_dispc_register_isr(dsi_framedone_irq_callback, (void *) dssdev, irq); if (r) { DSSERR("can't get FRAMEDONE irq\n"); - return r; + goto err; } - dispc_mgr_enable_stallmode(dssdev->manager->id, true); - dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 1); - - dss_mgr_set_timings(dssdev->manager, &timings); + dsi->mgr_config.stallmode = true; + dsi->mgr_config.fifohandcheck = true; } else { - dispc_mgr_enable_stallmode(dssdev->manager->id, false); - dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 0); + timings = dssdev->panel.timings; - dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings); + dsi->mgr_config.stallmode = false; + dsi->mgr_config.fifohandcheck = false; } - dispc_mgr_set_lcd_display_type(dssdev->manager->id, - OMAP_DSS_LCD_DISPLAY_TFT); - dispc_mgr_set_tft_data_lines(dssdev->manager->id, - dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt)); + /* + * override interlace, logic level and edge related parameters in + * omap_video_timings with default values + */ + timings.interlace = false; + timings.hsync_level = OMAPDSS_SIG_ACTIVE_HIGH; + timings.vsync_level = OMAPDSS_SIG_ACTIVE_HIGH; + timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; + timings.de_level = OMAPDSS_SIG_ACTIVE_HIGH; + timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES; + + dss_mgr_set_timings(dssdev->manager, &timings); + + r = dsi_configure_dispc_clocks(dssdev); + if (r) + goto err1; + + dsi->mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS; + dsi->mgr_config.video_port_width = + dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt); + dsi->mgr_config.lcden_sig_polarity = 0; + + dss_mgr_set_lcd_config(dssdev->manager, &dsi->mgr_config); + return 0; +err1: + if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) + omap_dispc_unregister_isr(dsi_framedone_irq_callback, + (void *) dssdev, irq); +err: + return r; } static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev) @@ -4393,8 +4441,7 @@ static void dsi_display_uninit_dispc(struct omap_dss_device *dssdev) if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_CMD_MODE) { u32 irq; - irq = dssdev->manager->id == OMAP_DSS_CHANNEL_LCD ? - DISPC_IRQ_FRAMEDONE : DISPC_IRQ_FRAMEDONE2; + irq = dispc_mgr_get_framedone_irq(dssdev->manager->id); omap_dispc_unregister_isr(dsi_framedone_irq_callback, (void *) dssdev, irq); @@ -4426,33 +4473,6 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev) return 0; } -static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev) -{ - struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); - struct dispc_clock_info dispc_cinfo; - int r; - unsigned long long fck; - - fck = dsi_get_pll_hsdiv_dispc_rate(dsidev); - - dispc_cinfo.lck_div = dssdev->clocks.dispc.channel.lck_div; - dispc_cinfo.pck_div = dssdev->clocks.dispc.channel.pck_div; - - r = dispc_calc_clock_rates(fck, &dispc_cinfo); - if (r) { - DSSERR("Failed to calc dispc clocks\n"); - return r; - } - - r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo); - if (r) { - DSSERR("Failed to set dispc clocks\n"); - return r; - } - - return 0; -} - static int dsi_display_init_dsi(struct omap_dss_device *dssdev) { struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); @@ -4474,10 +4494,6 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev) DSSDBG("PLL OK\n"); - r = dsi_configure_dispc_clocks(dssdev); - if (r) - goto err2; - r = dsi_cio_init(dssdev); if (r) goto err2; diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c index d2b57197b292..04b4586113e3 100644 --- a/drivers/video/omap2/dss/dss.c +++ b/drivers/video/omap2/dss/dss.c @@ -388,7 +388,8 @@ void dss_select_lcd_clk_source(enum omap_channel channel, dsi_wait_pll_hsdiv_dispc_active(dsidev); break; case OMAP_DSS_CLK_SRC_DSI2_PLL_HSDIV_DISPC: - BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2); + BUG_ON(channel != OMAP_DSS_CHANNEL_LCD2 && + channel != OMAP_DSS_CHANNEL_LCD3); b = 1; dsidev = dsi_get_dsidev_from_id(1); dsi_wait_pll_hsdiv_dispc_active(dsidev); @@ -398,10 +399,12 @@ void dss_select_lcd_clk_source(enum omap_channel channel, return; } - pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 12; + pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : + (channel == OMAP_DSS_CHANNEL_LCD2 ? 12 : 19); REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* LCDx_CLK_SWITCH */ - ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 1; + ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : + (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2); dss.lcd_clk_source[ix] = clk_src; } @@ -418,7 +421,8 @@ enum omap_dss_clk_source dss_get_dsi_clk_source(int dsi_module) enum omap_dss_clk_source dss_get_lcd_clk_source(enum omap_channel channel) { if (dss_has_feature(FEAT_LCD_CLK_SRC)) { - int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 1; + int ix = channel == OMAP_DSS_CHANNEL_LCD ? 0 : + (channel == OMAP_DSS_CHANNEL_LCD2 ? 1 : 2); return dss.lcd_clk_source[ix]; } else { /* LCD_CLK source is the same as DISPC_FCLK source for @@ -502,8 +506,7 @@ unsigned long dss_get_dpll4_rate(void) return 0; } -int dss_calc_clock_div(bool is_tft, unsigned long req_pck, - struct dss_clock_info *dss_cinfo, +int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo, struct dispc_clock_info *dispc_cinfo) { unsigned long prate; @@ -551,7 +554,7 @@ retry: fck = clk_get_rate(dss.dss_clk); fck_div = 1; - dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc); + dispc_find_clk_divs(req_pck, fck, &cur_dispc); match = 1; best_dss.fck = fck; @@ -581,7 +584,7 @@ retry: match = 1; - dispc_find_clk_divs(is_tft, req_pck, fck, &cur_dispc); + dispc_find_clk_divs(req_pck, fck, &cur_dispc); if (abs(cur_dispc.pck - req_pck) < abs(best_dispc.pck - req_pck)) { diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h index dd1092ceaeef..f67afe76f217 100644 --- a/drivers/video/omap2/dss/dss.h +++ b/drivers/video/omap2/dss/dss.h @@ -152,6 +152,25 @@ struct dsi_clock_info { u16 lp_clk_div; }; +struct reg_field { + u16 reg; + u8 high; + u8 low; +}; + +struct dss_lcd_mgr_config { + enum dss_io_pad_mode io_pad_mode; + + bool stallmode; + bool fifohandcheck; + + struct dispc_clock_info clock_info; + + int video_port_width; + + int lcden_sig_polarity; +}; + struct seq_file; struct platform_device; @@ -188,6 +207,8 @@ int dss_mgr_set_device(struct omap_overlay_manager *mgr, int dss_mgr_unset_device(struct omap_overlay_manager *mgr); void dss_mgr_set_timings(struct omap_overlay_manager *mgr, struct omap_video_timings *timings); +void dss_mgr_set_lcd_config(struct omap_overlay_manager *mgr, + const struct dss_lcd_mgr_config *config); const struct omap_video_timings *dss_mgr_get_timings(struct omap_overlay_manager *mgr); bool dss_ovl_is_enabled(struct omap_overlay *ovl); @@ -210,8 +231,6 @@ void dss_init_device(struct platform_device *pdev, struct omap_dss_device *dssdev); void dss_uninit_device(struct platform_device *pdev, struct omap_dss_device *dssdev); -bool dss_use_replication(struct omap_dss_device *dssdev, - enum omap_color_mode mode); /* manager */ int dss_init_overlay_managers(struct platform_device *pdev); @@ -223,8 +242,18 @@ int dss_mgr_check_timings(struct omap_overlay_manager *mgr, int dss_mgr_check(struct omap_overlay_manager *mgr, struct omap_overlay_manager_info *info, const struct omap_video_timings *mgr_timings, + const struct dss_lcd_mgr_config *config, struct omap_overlay_info **overlay_infos); +static inline bool dss_mgr_is_lcd(enum omap_channel id) +{ + if (id == OMAP_DSS_CHANNEL_LCD || id == OMAP_DSS_CHANNEL_LCD2 || + id == OMAP_DSS_CHANNEL_LCD3) + return true; + else + return false; +} + /* overlay */ void dss_init_overlays(struct platform_device *pdev); void dss_uninit_overlays(struct platform_device *pdev); @@ -234,6 +263,8 @@ int dss_ovl_simple_check(struct omap_overlay *ovl, const struct omap_overlay_info *info); int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info, const struct omap_video_timings *mgr_timings); +bool dss_ovl_use_replication(struct dss_lcd_mgr_config config, + enum omap_color_mode mode); /* DSS */ int dss_init_platform_driver(void) __init; @@ -268,8 +299,7 @@ unsigned long dss_get_dpll4_rate(void); int dss_calc_clock_rates(struct dss_clock_info *cinfo); int dss_set_clock_div(struct dss_clock_info *cinfo); int dss_get_clock_div(struct dss_clock_info *cinfo); -int dss_calc_clock_div(bool is_tft, unsigned long req_pck, - struct dss_clock_info *dss_cinfo, +int dss_calc_clock_div(unsigned long req_pck, struct dss_clock_info *dss_cinfo, struct dispc_clock_info *dispc_cinfo); /* SDI */ @@ -296,7 +326,7 @@ u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt); unsigned long dsi_get_pll_hsdiv_dispc_rate(struct platform_device *dsidev); int dsi_pll_set_clock_div(struct platform_device *dsidev, struct dsi_clock_info *cinfo); -int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, bool is_tft, +int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, unsigned long req_pck, struct dsi_clock_info *cinfo, struct dispc_clock_info *dispc_cinfo); int dsi_pll_init(struct platform_device *dsidev, bool enable_hsclk, @@ -330,7 +360,7 @@ static inline int dsi_pll_set_clock_div(struct platform_device *dsidev, return -ENODEV; } static inline int dsi_pll_calc_clock_div_pck(struct platform_device *dsidev, - bool is_tft, unsigned long req_pck, + unsigned long req_pck, struct dsi_clock_info *dsi_cinfo, struct dispc_clock_info *dispc_cinfo) { @@ -387,7 +417,7 @@ void dispc_set_loadmode(enum omap_dss_load_mode mode); bool dispc_mgr_timings_ok(enum omap_channel channel, const struct omap_video_timings *timings); unsigned long dispc_fclk_rate(void); -void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck, +void dispc_find_clk_divs(unsigned long req_pck, unsigned long fck, struct dispc_clock_info *cinfo); int dispc_calc_clock_rates(unsigned long dispc_fclk_rate, struct dispc_clock_info *cinfo); @@ -398,8 +428,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane, u32 *fifo_low, u32 *fifo_high, bool use_fifomerge, bool manual_update); int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, - bool ilace, bool replication, - const struct omap_video_timings *mgr_timings); + bool replication, const struct omap_video_timings *mgr_timings); int dispc_ovl_enable(enum omap_plane plane, bool enable); void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel); @@ -415,16 +444,13 @@ bool dispc_mgr_is_channel_enabled(enum omap_channel channel); void dispc_mgr_set_io_pad_mode(enum dss_io_pad_mode mode); void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable); void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines); -void dispc_mgr_set_lcd_display_type(enum omap_channel channel, - enum omap_lcd_display_type type); +void dispc_mgr_set_lcd_type_tft(enum omap_channel channel); void dispc_mgr_set_timings(enum omap_channel channel, struct omap_video_timings *timings); -void dispc_mgr_set_pol_freq(enum omap_channel channel, - enum omap_panel_config config, u8 acbi, u8 acb); unsigned long dispc_mgr_lclk_rate(enum omap_channel channel); unsigned long dispc_mgr_pclk_rate(enum omap_channel channel); unsigned long dispc_core_clk_rate(void); -int dispc_mgr_set_clock_div(enum omap_channel channel, +void dispc_mgr_set_clock_div(enum omap_channel channel, struct dispc_clock_info *cinfo); int dispc_mgr_get_clock_div(enum omap_channel channel, struct dispc_clock_info *cinfo); diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h index bdf469f080e7..996ffcbfed58 100644 --- a/drivers/video/omap2/dss/dss_features.h +++ b/drivers/video/omap2/dss/dss_features.h @@ -24,9 +24,9 @@ #include "ti_hdmi.h" #endif -#define MAX_DSS_MANAGERS 3 +#define MAX_DSS_MANAGERS 4 #define MAX_DSS_OVERLAYS 4 -#define MAX_DSS_LCD_MANAGERS 2 +#define MAX_DSS_LCD_MANAGERS 3 #define MAX_NUM_DSI 2 /* DSS has feature id */ @@ -36,6 +36,7 @@ enum dss_feat_id { FEAT_PCKFREEENABLE, FEAT_FUNCGATED, FEAT_MGR_LCD2, + FEAT_MGR_LCD3, FEAT_LINEBUFFERSPLIT, FEAT_ROWREPEATENABLE, FEAT_RESIZECONF, diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c index 26a2430a7028..060216fdc578 100644 --- a/drivers/video/omap2/dss/hdmi.c +++ b/drivers/video/omap2/dss/hdmi.c @@ -78,43 +78,214 @@ static struct { */ static const struct hdmi_config cea_timings[] = { -{ {640, 480, 25200, 96, 16, 48, 2, 10, 33, 0, 0, 0}, {1, HDMI_HDMI} }, -{ {720, 480, 27027, 62, 16, 60, 6, 9, 30, 0, 0, 0}, {2, HDMI_HDMI} }, -{ {1280, 720, 74250, 40, 110, 220, 5, 5, 20, 1, 1, 0}, {4, HDMI_HDMI} }, -{ {1920, 540, 74250, 44, 88, 148, 5, 2, 15, 1, 1, 1}, {5, HDMI_HDMI} }, -{ {1440, 240, 27027, 124, 38, 114, 3, 4, 15, 0, 0, 1}, {6, HDMI_HDMI} }, -{ {1920, 1080, 148500, 44, 88, 148, 5, 4, 36, 1, 1, 0}, {16, HDMI_HDMI} }, -{ {720, 576, 27000, 64, 12, 68, 5, 5, 39, 0, 0, 0}, {17, HDMI_HDMI} }, -{ {1280, 720, 74250, 40, 440, 220, 5, 5, 20, 1, 1, 0}, {19, HDMI_HDMI} }, -{ {1920, 540, 74250, 44, 528, 148, 5, 2, 15, 1, 1, 1}, {20, HDMI_HDMI} }, -{ {1440, 288, 27000, 126, 24, 138, 3, 2, 19, 0, 0, 1}, {21, HDMI_HDMI} }, -{ {1440, 576, 54000, 128, 24, 136, 5, 5, 39, 0, 0, 0}, {29, HDMI_HDMI} }, -{ {1920, 1080, 148500, 44, 528, 148, 5, 4, 36, 1, 1, 0}, {31, HDMI_HDMI} }, -{ {1920, 1080, 74250, 44, 638, 148, 5, 4, 36, 1, 1, 0}, {32, HDMI_HDMI} }, -{ {2880, 480, 108108, 248, 64, 240, 6, 9, 30, 0, 0, 0}, {35, HDMI_HDMI} }, -{ {2880, 576, 108000, 256, 48, 272, 5, 5, 39, 0, 0, 0}, {37, HDMI_HDMI} }, + { + { 640, 480, 25200, 96, 16, 48, 2, 10, 33, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 1, HDMI_HDMI }, + }, + { + { 720, 480, 27027, 62, 16, 60, 6, 9, 30, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 2, HDMI_HDMI }, + }, + { + { 1280, 720, 74250, 40, 110, 220, 5, 5, 20, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 4, HDMI_HDMI }, + }, + { + { 1920, 540, 74250, 44, 88, 148, 5, 2, 15, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + true, }, + { 5, HDMI_HDMI }, + }, + { + { 1440, 240, 27027, 124, 38, 114, 3, 4, 15, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + true, }, + { 6, HDMI_HDMI }, + }, + { + { 1920, 1080, 148500, 44, 88, 148, 5, 4, 36, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 16, HDMI_HDMI }, + }, + { + { 720, 576, 27000, 64, 12, 68, 5, 5, 39, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 17, HDMI_HDMI }, + }, + { + { 1280, 720, 74250, 40, 440, 220, 5, 5, 20, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 19, HDMI_HDMI }, + }, + { + { 1920, 540, 74250, 44, 528, 148, 5, 2, 15, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + true, }, + { 20, HDMI_HDMI }, + }, + { + { 1440, 288, 27000, 126, 24, 138, 3, 2, 19, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + true, }, + { 21, HDMI_HDMI }, + }, + { + { 1440, 576, 54000, 128, 24, 136, 5, 5, 39, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 29, HDMI_HDMI }, + }, + { + { 1920, 1080, 148500, 44, 528, 148, 5, 4, 36, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 31, HDMI_HDMI }, + }, + { + { 1920, 1080, 74250, 44, 638, 148, 5, 4, 36, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 32, HDMI_HDMI }, + }, + { + { 2880, 480, 108108, 248, 64, 240, 6, 9, 30, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 35, HDMI_HDMI }, + }, + { + { 2880, 576, 108000, 256, 48, 272, 5, 5, 39, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 37, HDMI_HDMI }, + }, }; + static const struct hdmi_config vesa_timings[] = { /* VESA From Here */ -{ {640, 480, 25175, 96, 16, 48, 2 , 11, 31, 0, 0, 0}, {4, HDMI_DVI} }, -{ {800, 600, 40000, 128, 40, 88, 4 , 1, 23, 1, 1, 0}, {9, HDMI_DVI} }, -{ {848, 480, 33750, 112, 16, 112, 8 , 6, 23, 1, 1, 0}, {0xE, HDMI_DVI} }, -{ {1280, 768, 79500, 128, 64, 192, 7 , 3, 20, 1, 0, 0}, {0x17, HDMI_DVI} }, -{ {1280, 800, 83500, 128, 72, 200, 6 , 3, 22, 1, 0, 0}, {0x1C, HDMI_DVI} }, -{ {1360, 768, 85500, 112, 64, 256, 6 , 3, 18, 1, 1, 0}, {0x27, HDMI_DVI} }, -{ {1280, 960, 108000, 112, 96, 312, 3 , 1, 36, 1, 1, 0}, {0x20, HDMI_DVI} }, -{ {1280, 1024, 108000, 112, 48, 248, 3 , 1, 38, 1, 1, 0}, {0x23, HDMI_DVI} }, -{ {1024, 768, 65000, 136, 24, 160, 6, 3, 29, 0, 0, 0}, {0x10, HDMI_DVI} }, -{ {1400, 1050, 121750, 144, 88, 232, 4, 3, 32, 1, 0, 0}, {0x2A, HDMI_DVI} }, -{ {1440, 900, 106500, 152, 80, 232, 6, 3, 25, 1, 0, 0}, {0x2F, HDMI_DVI} }, -{ {1680, 1050, 146250, 176 , 104, 280, 6, 3, 30, 1, 0, 0}, {0x3A, HDMI_DVI} }, -{ {1366, 768, 85500, 143, 70, 213, 3, 3, 24, 1, 1, 0}, {0x51, HDMI_DVI} }, -{ {1920, 1080, 148500, 44, 148, 80, 5, 4, 36, 1, 1, 0}, {0x52, HDMI_DVI} }, -{ {1280, 768, 68250, 32, 48, 80, 7, 3, 12, 0, 1, 0}, {0x16, HDMI_DVI} }, -{ {1400, 1050, 101000, 32, 48, 80, 4, 3, 23, 0, 1, 0}, {0x29, HDMI_DVI} }, -{ {1680, 1050, 119000, 32, 48, 80, 6, 3, 21, 0, 1, 0}, {0x39, HDMI_DVI} }, -{ {1280, 800, 79500, 32, 48, 80, 6, 3, 14, 0, 1, 0}, {0x1B, HDMI_DVI} }, -{ {1280, 720, 74250, 40, 110, 220, 5, 5, 20, 1, 1, 0}, {0x55, HDMI_DVI} } + { + { 640, 480, 25175, 96, 16, 48, 2, 11, 31, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 4, HDMI_DVI }, + }, + { + { 800, 600, 40000, 128, 40, 88, 4, 1, 23, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 9, HDMI_DVI }, + }, + { + { 848, 480, 33750, 112, 16, 112, 8, 6, 23, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0xE, HDMI_DVI }, + }, + { + { 1280, 768, 79500, 128, 64, 192, 7, 3, 20, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 0x17, HDMI_DVI }, + }, + { + { 1280, 800, 83500, 128, 72, 200, 6, 3, 22, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 0x1C, HDMI_DVI }, + }, + { + { 1360, 768, 85500, 112, 64, 256, 6, 3, 18, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x27, HDMI_DVI }, + }, + { + { 1280, 960, 108000, 112, 96, 312, 3, 1, 36, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x20, HDMI_DVI }, + }, + { + { 1280, 1024, 108000, 112, 48, 248, 3, 1, 38, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x23, HDMI_DVI }, + }, + { + { 1024, 768, 65000, 136, 24, 160, 6, 3, 29, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 0x10, HDMI_DVI }, + }, + { + { 1400, 1050, 121750, 144, 88, 232, 4, 3, 32, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 0x2A, HDMI_DVI }, + }, + { + { 1440, 900, 106500, 152, 80, 232, 6, 3, 25, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 0x2F, HDMI_DVI }, + }, + { + { 1680, 1050, 146250, 176 , 104, 280, 6, 3, 30, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_LOW, + false, }, + { 0x3A, HDMI_DVI }, + }, + { + { 1366, 768, 85500, 143, 70, 213, 3, 3, 24, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x51, HDMI_DVI }, + }, + { + { 1920, 1080, 148500, 44, 148, 80, 5, 4, 36, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x52, HDMI_DVI }, + }, + { + { 1280, 768, 68250, 32, 48, 80, 7, 3, 12, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x16, HDMI_DVI }, + }, + { + { 1400, 1050, 101000, 32, 48, 80, 4, 3, 23, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x29, HDMI_DVI }, + }, + { + { 1680, 1050, 119000, 32, 48, 80, 6, 3, 21, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x39, HDMI_DVI }, + }, + { + { 1280, 800, 79500, 32, 48, 80, 6, 3, 14, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x1B, HDMI_DVI }, + }, + { + { 1280, 720, 74250, 40, 110, 220, 5, 5, 20, + OMAPDSS_SIG_ACTIVE_HIGH, OMAPDSS_SIG_ACTIVE_HIGH, + false, }, + { 0x55, HDMI_DVI }, + }, }; static int hdmi_runtime_get(void) @@ -179,7 +350,7 @@ static const struct hdmi_config *hdmi_get_timings(void) } static bool hdmi_timings_compare(struct omap_video_timings *timing1, - const struct hdmi_video_timings *timing2) + const struct omap_video_timings *timing2) { int timing1_vsync, timing1_hsync, timing2_vsync, timing2_hsync; @@ -758,6 +929,7 @@ static int __init omapdss_hdmihw_probe(struct platform_device *pdev) hdmi.ip_data.core_av_offset = HDMI_CORE_AV; hdmi.ip_data.pll_offset = HDMI_PLLCTRL; hdmi.ip_data.phy_offset = HDMI_PHY; + mutex_init(&hdmi.ip_data.lock); hdmi_panel_init(); @@ -785,7 +957,7 @@ static int __exit omapdss_hdmihw_remove(struct platform_device *pdev) static int hdmi_runtime_suspend(struct device *dev) { - clk_disable(hdmi.sys_clk); + clk_disable_unprepare(hdmi.sys_clk); dispc_runtime_put(); @@ -800,7 +972,7 @@ static int hdmi_runtime_resume(struct device *dev) if (r < 0) return r; - clk_enable(hdmi.sys_clk); + clk_prepare_enable(hdmi.sys_clk); return 0; } diff --git a/drivers/video/omap2/dss/hdmi_panel.c b/drivers/video/omap2/dss/hdmi_panel.c index 1179e3c4b1c7..e10844faadf9 100644 --- a/drivers/video/omap2/dss/hdmi_panel.c +++ b/drivers/video/omap2/dss/hdmi_panel.c @@ -43,10 +43,11 @@ static int hdmi_panel_probe(struct omap_dss_device *dssdev) { DSSDBG("ENTER hdmi_panel_probe\n"); - dssdev->panel.config = OMAP_DSS_LCD_TFT | - OMAP_DSS_LCD_IVS | OMAP_DSS_LCD_IHS; - - dssdev->panel.timings = (struct omap_video_timings){640, 480, 25175, 96, 16, 48, 2 , 11, 31}; + dssdev->panel.timings = (struct omap_video_timings) + { 640, 480, 25175, 96, 16, 48, 2, 11, 31, + OMAPDSS_SIG_ACTIVE_LOW, OMAPDSS_SIG_ACTIVE_LOW, + false, + }; DSSDBG("hdmi_panel_probe x_res= %d y_res = %d\n", dssdev->panel.timings.x_res, diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c index 0cbcde4c688a..53710fadc82d 100644 --- a/drivers/video/omap2/dss/manager.c +++ b/drivers/video/omap2/dss/manager.c @@ -500,16 +500,12 @@ static int dss_mgr_wait_for_vsync(struct omap_overlay_manager *mgr) if (r) return r; - if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) { + if (mgr->device->type == OMAP_DISPLAY_TYPE_VENC) irq = DISPC_IRQ_EVSYNC_ODD; - } else if (mgr->device->type == OMAP_DISPLAY_TYPE_HDMI) { + else if (mgr->device->type == OMAP_DISPLAY_TYPE_HDMI) irq = DISPC_IRQ_EVSYNC_EVEN; - } else { - if (mgr->id == OMAP_DSS_CHANNEL_LCD) - irq = DISPC_IRQ_VSYNC; - else - irq = DISPC_IRQ_VSYNC2; - } + else + irq = dispc_mgr_get_vsync_irq(mgr->id); r = omap_dispc_wait_for_irq_interruptible_timeout(irq, timeout); @@ -545,6 +541,10 @@ int dss_init_overlay_managers(struct platform_device *pdev) mgr->name = "lcd2"; mgr->id = OMAP_DSS_CHANNEL_LCD2; break; + case 3: + mgr->name = "lcd3"; + mgr->id = OMAP_DSS_CHANNEL_LCD3; + break; } mgr->set_device = &dss_mgr_set_device; @@ -665,9 +665,40 @@ int dss_mgr_check_timings(struct omap_overlay_manager *mgr, return 0; } +static int dss_mgr_check_lcd_config(struct omap_overlay_manager *mgr, + const struct dss_lcd_mgr_config *config) +{ + struct dispc_clock_info cinfo = config->clock_info; + int dl = config->video_port_width; + bool stallmode = config->stallmode; + bool fifohandcheck = config->fifohandcheck; + + if (cinfo.lck_div < 1 || cinfo.lck_div > 255) + return -EINVAL; + + if (cinfo.pck_div < 1 || cinfo.pck_div > 255) + return -EINVAL; + + if (dl != 12 && dl != 16 && dl != 18 && dl != 24) + return -EINVAL; + + /* fifohandcheck should be used only with stallmode */ + if (stallmode == false && fifohandcheck == true) + return -EINVAL; + + /* + * io pad mode can be only checked by using dssdev connected to the + * manager. Ignore checking these for now, add checks when manager + * is capable of holding information related to the connected interface + */ + + return 0; +} + int dss_mgr_check(struct omap_overlay_manager *mgr, struct omap_overlay_manager_info *info, const struct omap_video_timings *mgr_timings, + const struct dss_lcd_mgr_config *lcd_config, struct omap_overlay_info **overlay_infos) { struct omap_overlay *ovl; @@ -683,6 +714,10 @@ int dss_mgr_check(struct omap_overlay_manager *mgr, if (r) return r; + r = dss_mgr_check_lcd_config(mgr, lcd_config); + if (r) + return r; + list_for_each_entry(ovl, &mgr->overlays, list) { struct omap_overlay_info *oi; int r; diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c index b0ba60f88dd2..952c6fad9a81 100644 --- a/drivers/video/omap2/dss/overlay.c +++ b/drivers/video/omap2/dss/overlay.c @@ -528,14 +528,24 @@ void dss_recheck_connections(struct omap_dss_device *dssdev, bool force) struct omap_overlay_manager *lcd_mgr; struct omap_overlay_manager *tv_mgr; struct omap_overlay_manager *lcd2_mgr = NULL; + struct omap_overlay_manager *lcd3_mgr = NULL; struct omap_overlay_manager *mgr = NULL; - lcd_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD); - tv_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_TV); + lcd_mgr = omap_dss_get_overlay_manager(OMAP_DSS_CHANNEL_LCD); + tv_mgr = omap_dss_get_overlay_manager(OMAP_DSS_CHANNEL_DIGIT); + if (dss_has_feature(FEAT_MGR_LCD3)) + lcd3_mgr = omap_dss_get_overlay_manager(OMAP_DSS_CHANNEL_LCD3); if (dss_has_feature(FEAT_MGR_LCD2)) - lcd2_mgr = omap_dss_get_overlay_manager(OMAP_DSS_OVL_MGR_LCD2); - - if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2) { + lcd2_mgr = omap_dss_get_overlay_manager(OMAP_DSS_CHANNEL_LCD2); + + if (dssdev->channel == OMAP_DSS_CHANNEL_LCD3) { + if (!lcd3_mgr->device || force) { + if (lcd3_mgr->device) + lcd3_mgr->unset_device(lcd3_mgr); + lcd3_mgr->set_device(lcd3_mgr, dssdev); + mgr = lcd3_mgr; + } + } else if (dssdev->channel == OMAP_DSS_CHANNEL_LCD2) { if (!lcd2_mgr->device || force) { if (lcd2_mgr->device) lcd2_mgr->unset_device(lcd2_mgr); @@ -677,3 +687,16 @@ int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info, return 0; } + +/* + * Checks if replication logic should be used. Only use when overlay is in + * RGB12U or RGB16 mode, and video port width interface is 18bpp or 24bpp + */ +bool dss_ovl_use_replication(struct dss_lcd_mgr_config config, + enum omap_color_mode mode) +{ + if (mode != OMAP_DSS_COLOR_RGB12U && mode != OMAP_DSS_COLOR_RGB16) + return false; + + return config.video_port_width > 16; +} diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c index 7985fa12b9b4..7c087424b634 100644 --- a/drivers/video/omap2/dss/rfbi.c +++ b/drivers/video/omap2/dss/rfbi.c @@ -300,10 +300,11 @@ void omap_rfbi_write_pixels(const void __iomem *buf, int scr_width, } EXPORT_SYMBOL(omap_rfbi_write_pixels); -static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, +static int rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, u16 height, void (*callback)(void *data), void *data) { u32 l; + int r; struct omap_video_timings timings = { .hsw = 1, .hfp = 1, @@ -322,7 +323,9 @@ static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, dss_mgr_set_timings(dssdev->manager, &timings); - dispc_mgr_enable(dssdev->manager->id, true); + r = dss_mgr_enable(dssdev->manager); + if (r) + return r; rfbi.framedone_callback = callback; rfbi.framedone_callback_data = data; @@ -335,6 +338,8 @@ static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width, l = FLD_MOD(l, 1, 4, 4); /* ITE */ rfbi_write_reg(RFBI_CONTROL, l); + + return 0; } static void framedone_callback(void *data, u32 mask) @@ -814,8 +819,11 @@ int omap_rfbi_update(struct omap_dss_device *dssdev, u16 x, u16 y, u16 w, u16 h, void (*callback)(void *), void *data) { - rfbi_transfer_area(dssdev, w, h, callback, data); - return 0; + int r; + + r = rfbi_transfer_area(dssdev, w, h, callback, data); + + return r; } EXPORT_SYMBOL(omap_rfbi_update); @@ -859,6 +867,22 @@ static void rfbi_dump_regs(struct seq_file *s) #undef DUMPREG } +static void rfbi_config_lcd_manager(struct omap_dss_device *dssdev) +{ + struct dss_lcd_mgr_config mgr_config; + + mgr_config.io_pad_mode = DSS_IO_PAD_MODE_RFBI; + + mgr_config.stallmode = true; + /* Do we need fifohandcheck for RFBI? */ + mgr_config.fifohandcheck = false; + + mgr_config.video_port_width = dssdev->ctrl.pixel_size; + mgr_config.lcden_sig_polarity = 0; + + dss_mgr_set_lcd_config(dssdev->manager, &mgr_config); +} + int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev) { int r; @@ -885,13 +909,7 @@ int omapdss_rfbi_display_enable(struct omap_dss_device *dssdev) goto err1; } - dispc_mgr_set_lcd_display_type(dssdev->manager->id, - OMAP_DSS_LCD_DISPLAY_TFT); - - dispc_mgr_set_io_pad_mode(DSS_IO_PAD_MODE_RFBI); - dispc_mgr_enable_stallmode(dssdev->manager->id, true); - - dispc_mgr_set_tft_data_lines(dssdev->manager->id, dssdev->ctrl.pixel_size); + rfbi_config_lcd_manager(dssdev); rfbi_configure(dssdev->phy.rfbi.channel, dssdev->ctrl.pixel_size, diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c index 3a43dc2a9b46..5d31699fbd3c 100644 --- a/drivers/video/omap2/dss/sdi.c +++ b/drivers/video/omap2/dss/sdi.c @@ -32,19 +32,21 @@ static struct { bool update_enabled; struct regulator *vdds_sdi_reg; -} sdi; -static void sdi_basic_init(struct omap_dss_device *dssdev) + struct dss_lcd_mgr_config mgr_config; +} sdi; +static void sdi_config_lcd_manager(struct omap_dss_device *dssdev) { - dispc_mgr_set_io_pad_mode(DSS_IO_PAD_MODE_BYPASS); - dispc_mgr_enable_stallmode(dssdev->manager->id, false); + sdi.mgr_config.io_pad_mode = DSS_IO_PAD_MODE_BYPASS; + + sdi.mgr_config.stallmode = false; + sdi.mgr_config.fifohandcheck = false; - dispc_mgr_set_lcd_display_type(dssdev->manager->id, - OMAP_DSS_LCD_DISPLAY_TFT); + sdi.mgr_config.video_port_width = 24; + sdi.mgr_config.lcden_sig_polarity = 1; - dispc_mgr_set_tft_data_lines(dssdev->manager->id, 24); - dispc_lcd_enable_signal_polarity(1); + dss_mgr_set_lcd_config(dssdev->manager, &sdi.mgr_config); } int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) @@ -52,8 +54,6 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) struct omap_video_timings *t = &dssdev->panel.timings; struct dss_clock_info dss_cinfo; struct dispc_clock_info dispc_cinfo; - u16 lck_div, pck_div; - unsigned long fck; unsigned long pck; int r; @@ -76,24 +76,17 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) if (r) goto err_get_dispc; - sdi_basic_init(dssdev); - /* 15.5.9.1.2 */ - dssdev->panel.config |= OMAP_DSS_LCD_RF | OMAP_DSS_LCD_ONOFF; - - dispc_mgr_set_pol_freq(dssdev->manager->id, dssdev->panel.config, - dssdev->panel.acbi, dssdev->panel.acb); + dssdev->panel.timings.data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; + dssdev->panel.timings.sync_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; - r = dss_calc_clock_div(1, t->pixel_clock * 1000, - &dss_cinfo, &dispc_cinfo); + r = dss_calc_clock_div(t->pixel_clock * 1000, &dss_cinfo, &dispc_cinfo); if (r) goto err_calc_clock_div; - fck = dss_cinfo.fck; - lck_div = dispc_cinfo.lck_div; - pck_div = dispc_cinfo.pck_div; + sdi.mgr_config.clock_info = dispc_cinfo; - pck = fck / lck_div / pck_div / 1000; + pck = dss_cinfo.fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div / 1000; if (pck != t->pixel_clock) { DSSWARN("Could not find exact pixel clock. Requested %d kHz, " @@ -110,9 +103,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) if (r) goto err_set_dss_clock_div; - r = dispc_mgr_set_clock_div(dssdev->manager->id, &dispc_cinfo); - if (r) - goto err_set_dispc_clock_div; + sdi_config_lcd_manager(dssdev); dss_sdi_init(dssdev->phy.sdi.datapairs); r = dss_sdi_enable(); @@ -129,7 +120,6 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev) err_mgr_enable: dss_sdi_disable(); err_sdi_enable: -err_set_dispc_clock_div: err_set_dss_clock_div: err_calc_clock_div: dispc_runtime_put(); diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h index e734cb444bc7..b046c208cb97 100644 --- a/drivers/video/omap2/dss/ti_hdmi.h +++ b/drivers/video/omap2/dss/ti_hdmi.h @@ -42,30 +42,13 @@ enum hdmi_clk_refsel { HDMI_REFSEL_SYSCLK = 3 }; -/* HDMI timing structure */ -struct hdmi_video_timings { - u16 x_res; - u16 y_res; - /* Unit: KHz */ - u32 pixel_clock; - u16 hsw; - u16 hfp; - u16 hbp; - u16 vsw; - u16 vfp; - u16 vbp; - bool vsync_pol; - bool hsync_pol; - bool interlace; -}; - struct hdmi_cm { int code; int mode; }; struct hdmi_config { - struct hdmi_video_timings timings; + struct omap_video_timings timings; struct hdmi_cm cm; }; @@ -177,7 +160,7 @@ struct hdmi_ip_data { /* ti_hdmi_4xxx_ip private data. These should be in a separate struct */ int hpd_gpio; - bool phy_tx_enabled; + struct mutex lock; }; int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data); void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data); diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c index 4dae1b291079..c23b85a20cdc 100644 --- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c +++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c @@ -157,6 +157,10 @@ static int hdmi_pll_init(struct hdmi_ip_data *ip_data) /* PHY_PWR_CMD */ static int hdmi_set_phy_pwr(struct hdmi_ip_data *ip_data, enum hdmi_phy_pwr val) { + /* Return if already the state */ + if (REG_GET(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, 5, 4) == val) + return 0; + /* Command for power control of HDMI PHY */ REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_PWR_CTRL, val, 7, 6); @@ -231,21 +235,13 @@ void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data) static int hdmi_check_hpd_state(struct hdmi_ip_data *ip_data) { - unsigned long flags; bool hpd; int r; - /* this should be in ti_hdmi_4xxx_ip private data */ - static DEFINE_SPINLOCK(phy_tx_lock); - spin_lock_irqsave(&phy_tx_lock, flags); + mutex_lock(&ip_data->lock); hpd = gpio_get_value(ip_data->hpd_gpio); - if (hpd == ip_data->phy_tx_enabled) { - spin_unlock_irqrestore(&phy_tx_lock, flags); - return 0; - } - if (hpd) r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON); else @@ -257,9 +253,8 @@ static int hdmi_check_hpd_state(struct hdmi_ip_data *ip_data) goto err; } - ip_data->phy_tx_enabled = hpd; err: - spin_unlock_irqrestore(&phy_tx_lock, flags); + mutex_unlock(&ip_data->lock); return r; } @@ -327,7 +322,6 @@ void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data) free_irq(gpio_to_irq(ip_data->hpd_gpio), ip_data); hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF); - ip_data->phy_tx_enabled = false; } static int hdmi_core_ddc_init(struct hdmi_ip_data *ip_data) @@ -747,11 +741,15 @@ static void hdmi_wp_video_config_format(struct hdmi_ip_data *ip_data, static void hdmi_wp_video_config_interface(struct hdmi_ip_data *ip_data) { u32 r; + bool vsync_pol, hsync_pol; pr_debug("Enter hdmi_wp_video_config_interface\n"); + vsync_pol = ip_data->cfg.timings.vsync_level == OMAPDSS_SIG_ACTIVE_HIGH; + hsync_pol = ip_data->cfg.timings.hsync_level == OMAPDSS_SIG_ACTIVE_HIGH; + r = hdmi_read_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG); - r = FLD_MOD(r, ip_data->cfg.timings.vsync_pol, 7, 7); - r = FLD_MOD(r, ip_data->cfg.timings.hsync_pol, 6, 6); + r = FLD_MOD(r, vsync_pol, 7, 7); + r = FLD_MOD(r, hsync_pol, 6, 6); r = FLD_MOD(r, ip_data->cfg.timings.interlace, 3, 3); r = FLD_MOD(r, 1, 1, 0); /* HDMI_TIMING_MASTER_24BIT */ hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, r); diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c index 3907c8b6ecbc..3a220877461a 100644 --- a/drivers/video/omap2/dss/venc.c +++ b/drivers/video/omap2/dss/venc.c @@ -272,6 +272,8 @@ const struct omap_video_timings omap_dss_pal_timings = { .vsw = 5, .vfp = 5, .vbp = 41, + + .interlace = true, }; EXPORT_SYMBOL(omap_dss_pal_timings); @@ -285,6 +287,8 @@ const struct omap_video_timings omap_dss_ntsc_timings = { .vsw = 6, .vfp = 6, .vbp = 31, + + .interlace = true, }; EXPORT_SYMBOL(omap_dss_ntsc_timings); @@ -930,7 +934,7 @@ static int __exit omap_venchw_remove(struct platform_device *pdev) static int venc_runtime_suspend(struct device *dev) { if (venc.tv_dac_clk) - clk_disable(venc.tv_dac_clk); + clk_disable_unprepare(venc.tv_dac_clk); dispc_runtime_put(); @@ -946,7 +950,7 @@ static int venc_runtime_resume(struct device *dev) return r; if (venc.tv_dac_clk) - clk_enable(venc.tv_dac_clk); + clk_prepare_enable(venc.tv_dac_clk); return 0; } diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c index 3450ea0966c9..08ec1a7103f2 100644 --- a/drivers/video/omap2/omapfb/omapfb-main.c +++ b/drivers/video/omap2/omapfb/omapfb-main.c @@ -733,6 +733,12 @@ int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var) var->lower_margin = timings.vfp; var->hsync_len = timings.hsw; var->vsync_len = timings.vsw; + var->sync |= timings.hsync_level == OMAPDSS_SIG_ACTIVE_HIGH ? + FB_SYNC_HOR_HIGH_ACT : 0; + var->sync |= timings.vsync_level == OMAPDSS_SIG_ACTIVE_HIGH ? + FB_SYNC_VERT_HIGH_ACT : 0; + var->vmode = timings.interlace ? + FB_VMODE_INTERLACED : FB_VMODE_NONINTERLACED; } else { var->pixclock = 0; var->left_margin = 0; @@ -741,12 +747,10 @@ int check_fb_var(struct fb_info *fbi, struct fb_var_screeninfo *var) var->lower_margin = 0; var->hsync_len = 0; var->vsync_len = 0; + var->sync = 0; + var->vmode = FB_VMODE_NONINTERLACED; } - /* TODO: get these from panel->config */ - var->vmode = FB_VMODE_NONINTERLACED; - var->sync = 0; - return 0; } @@ -1993,6 +1997,7 @@ static int omapfb_create_framebuffers(struct omapfb2_device *fbdev) } static int omapfb_mode_to_timings(const char *mode_str, + struct omap_dss_device *display, struct omap_video_timings *timings, u8 *bpp) { struct fb_info *fbi; @@ -2046,6 +2051,14 @@ static int omapfb_mode_to_timings(const char *mode_str, goto err; } + if (display->driver->get_timings) { + display->driver->get_timings(display, timings); + } else { + timings->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; + timings->de_level = OMAPDSS_SIG_ACTIVE_HIGH; + timings->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES; + } + timings->pixel_clock = PICOS2KHZ(var->pixclock); timings->hbp = var->left_margin; timings->hfp = var->right_margin; @@ -2055,6 +2068,13 @@ static int omapfb_mode_to_timings(const char *mode_str, timings->vsw = var->vsync_len; timings->x_res = var->xres; timings->y_res = var->yres; + timings->hsync_level = var->sync & FB_SYNC_HOR_HIGH_ACT ? + OMAPDSS_SIG_ACTIVE_HIGH : + OMAPDSS_SIG_ACTIVE_LOW; + timings->vsync_level = var->sync & FB_SYNC_VERT_HIGH_ACT ? + OMAPDSS_SIG_ACTIVE_HIGH : + OMAPDSS_SIG_ACTIVE_LOW; + timings->interlace = var->vmode & FB_VMODE_INTERLACED; switch (var->bits_per_pixel) { case 16: @@ -2085,7 +2105,7 @@ static int omapfb_set_def_mode(struct omapfb2_device *fbdev, struct omap_video_timings timings, temp_timings; struct omapfb_display_data *d; - r = omapfb_mode_to_timings(mode_str, &timings, &bpp); + r = omapfb_mode_to_timings(mode_str, display, &timings, &bpp); if (r) return r; @@ -2178,8 +2198,17 @@ static int omapfb_parse_def_modes(struct omapfb2_device *fbdev) } static void fb_videomode_to_omap_timings(struct fb_videomode *m, + struct omap_dss_device *display, struct omap_video_timings *t) { + if (display->driver->get_timings) { + display->driver->get_timings(display, t); + } else { + t->data_pclk_edge = OMAPDSS_DRIVE_SIG_RISING_EDGE; + t->de_level = OMAPDSS_SIG_ACTIVE_HIGH; + t->sync_pclk_edge = OMAPDSS_DRIVE_SIG_OPPOSITE_EDGES; + } + t->x_res = m->xres; t->y_res = m->yres; t->pixel_clock = PICOS2KHZ(m->pixclock); @@ -2189,6 +2218,13 @@ static void fb_videomode_to_omap_timings(struct fb_videomode *m, t->vsw = m->vsync_len; t->vfp = m->lower_margin; t->vbp = m->upper_margin; + t->hsync_level = m->sync & FB_SYNC_HOR_HIGH_ACT ? + OMAPDSS_SIG_ACTIVE_HIGH : + OMAPDSS_SIG_ACTIVE_LOW; + t->vsync_level = m->sync & FB_SYNC_VERT_HIGH_ACT ? + OMAPDSS_SIG_ACTIVE_HIGH : + OMAPDSS_SIG_ACTIVE_LOW; + t->interlace = m->vmode & FB_VMODE_INTERLACED; } static int omapfb_find_best_mode(struct omap_dss_device *display, @@ -2231,7 +2267,7 @@ static int omapfb_find_best_mode(struct omap_dss_device *display, if (m->xres == 2880 || m->xres == 1440) continue; - fb_videomode_to_omap_timings(m, &t); + fb_videomode_to_omap_timings(m, display, &t); r = display->driver->check_timings(display, &t); if (r == 0 && best_xres < m->xres) { @@ -2245,7 +2281,8 @@ static int omapfb_find_best_mode(struct omap_dss_device *display, goto err2; } - fb_videomode_to_omap_timings(&specs->modedb[best_idx], timings); + fb_videomode_to_omap_timings(&specs->modedb[best_idx], display, + timings); r = 0; diff --git a/drivers/video/s3fb.c b/drivers/video/s3fb.c index 2c80246b18b8..1d007366b917 100644 --- a/drivers/video/s3fb.c +++ b/drivers/video/s3fb.c @@ -84,7 +84,7 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64", "S3 Virge/VX", "S3 Virge/DX", "S3 Virge/GX", "S3 Virge/GX2", "S3 Virge/GX2+", "", "S3 Trio3D/1X", "S3 Trio3D/2X", "S3 Trio3D/2X", - "S3 Trio3D"}; + "S3 Trio3D", "S3 Virge/MX"}; #define CHIP_UNKNOWN 0x00 #define CHIP_732_TRIO32 0x01 @@ -105,6 +105,7 @@ static const char * const s3_names[] = {"S3 Unknown", "S3 Trio32", "S3 Trio64", #define CHIP_362_TRIO3D_2X 0x11 #define CHIP_368_TRIO3D_2X 0x12 #define CHIP_365_TRIO3D 0x13 +#define CHIP_260_VIRGE_MX 0x14 #define CHIP_XXX_TRIO 0x80 #define CHIP_XXX_TRIO64V2_DXGX 0x81 @@ -280,7 +281,8 @@ static int __devinit s3fb_setup_ddc_bus(struct fb_info *info) */ /* vga_wseq(par->state.vgabase, 0x08, 0x06); - not needed, already unlocked */ if (par->chip == CHIP_357_VIRGE_GX2 || - par->chip == CHIP_359_VIRGE_GX2P) + par->chip == CHIP_359_VIRGE_GX2P || + par->chip == CHIP_260_VIRGE_MX) svga_wseq_mask(par->state.vgabase, 0x0d, 0x01, 0x03); else svga_wseq_mask(par->state.vgabase, 0x0d, 0x00, 0x03); @@ -487,7 +489,8 @@ static void s3_set_pixclock(struct fb_info *info, u32 pixclock) par->chip == CHIP_359_VIRGE_GX2P || par->chip == CHIP_360_TRIO3D_1X || par->chip == CHIP_362_TRIO3D_2X || - par->chip == CHIP_368_TRIO3D_2X) { + par->chip == CHIP_368_TRIO3D_2X || + par->chip == CHIP_260_VIRGE_MX) { vga_wseq(par->state.vgabase, 0x12, (n - 2) | ((r & 3) << 6)); /* n and two bits of r */ vga_wseq(par->state.vgabase, 0x29, r >> 2); /* remaining highest bit of r */ } else @@ -690,7 +693,8 @@ static int s3fb_set_par(struct fb_info *info) par->chip != CHIP_359_VIRGE_GX2P && par->chip != CHIP_360_TRIO3D_1X && par->chip != CHIP_362_TRIO3D_2X && - par->chip != CHIP_368_TRIO3D_2X) { + par->chip != CHIP_368_TRIO3D_2X && + par->chip != CHIP_260_VIRGE_MX) { vga_wcrt(par->state.vgabase, 0x54, 0x18); /* M parameter */ vga_wcrt(par->state.vgabase, 0x60, 0xff); /* N parameter */ vga_wcrt(par->state.vgabase, 0x61, 0xff); /* L parameter */ @@ -739,7 +743,8 @@ static int s3fb_set_par(struct fb_info *info) par->chip == CHIP_368_TRIO3D_2X || par->chip == CHIP_365_TRIO3D || par->chip == CHIP_375_VIRGE_DX || - par->chip == CHIP_385_VIRGE_GX) { + par->chip == CHIP_385_VIRGE_GX || + par->chip == CHIP_260_VIRGE_MX) { dbytes = info->var.xres * ((bpp+7)/8); vga_wcrt(par->state.vgabase, 0x91, (dbytes + 7) / 8); vga_wcrt(par->state.vgabase, 0x90, (((dbytes + 7) / 8) >> 8) | 0x80); @@ -751,7 +756,8 @@ static int s3fb_set_par(struct fb_info *info) par->chip == CHIP_359_VIRGE_GX2P || par->chip == CHIP_360_TRIO3D_1X || par->chip == CHIP_362_TRIO3D_2X || - par->chip == CHIP_368_TRIO3D_2X) + par->chip == CHIP_368_TRIO3D_2X || + par->chip == CHIP_260_VIRGE_MX) vga_wcrt(par->state.vgabase, 0x34, 0x00); else /* enable Data Transfer Position Control (DTPC) */ vga_wcrt(par->state.vgabase, 0x34, 0x10); @@ -807,7 +813,8 @@ static int s3fb_set_par(struct fb_info *info) par->chip == CHIP_359_VIRGE_GX2P || par->chip == CHIP_360_TRIO3D_1X || par->chip == CHIP_362_TRIO3D_2X || - par->chip == CHIP_368_TRIO3D_2X) + par->chip == CHIP_368_TRIO3D_2X || + par->chip == CHIP_260_VIRGE_MX) svga_wcrt_mask(par->state.vgabase, 0x67, 0x00, 0xF0); else { svga_wcrt_mask(par->state.vgabase, 0x67, 0x10, 0xF0); @@ -837,7 +844,8 @@ static int s3fb_set_par(struct fb_info *info) par->chip != CHIP_359_VIRGE_GX2P && par->chip != CHIP_360_TRIO3D_1X && par->chip != CHIP_362_TRIO3D_2X && - par->chip != CHIP_368_TRIO3D_2X) + par->chip != CHIP_368_TRIO3D_2X && + par->chip != CHIP_260_VIRGE_MX) hmul = 2; } break; @@ -864,7 +872,8 @@ static int s3fb_set_par(struct fb_info *info) par->chip != CHIP_359_VIRGE_GX2P && par->chip != CHIP_360_TRIO3D_1X && par->chip != CHIP_362_TRIO3D_2X && - par->chip != CHIP_368_TRIO3D_2X) + par->chip != CHIP_368_TRIO3D_2X && + par->chip != CHIP_260_VIRGE_MX) hmul = 2; } break; @@ -1208,7 +1217,8 @@ static int __devinit s3_pci_probe(struct pci_dev *dev, const struct pci_device_i break; } } else if (par->chip == CHIP_357_VIRGE_GX2 || - par->chip == CHIP_359_VIRGE_GX2P) { + par->chip == CHIP_359_VIRGE_GX2P || + par->chip == CHIP_260_VIRGE_MX) { switch ((regval & 0xC0) >> 6) { case 1: /* 4MB */ info->screen_size = 4 << 20; @@ -1515,6 +1525,7 @@ static struct pci_device_id s3_devices[] __devinitdata = { {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A12), .driver_data = CHIP_359_VIRGE_GX2P}, {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8A13), .driver_data = CHIP_36X_TRIO3D_1X_2X}, {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8904), .driver_data = CHIP_365_TRIO3D}, + {PCI_DEVICE(PCI_VENDOR_ID_S3, 0x8C01), .driver_data = CHIP_260_VIRGE_MX}, {0, 0, 0, 0, 0, 0, 0} }; diff --git a/drivers/video/sh_mipi_dsi.c b/drivers/video/sh_mipi_dsi.c index 4c6b84488561..3951fdae5f68 100644 --- a/drivers/video/sh_mipi_dsi.c +++ b/drivers/video/sh_mipi_dsi.c @@ -127,8 +127,7 @@ static void sh_mipi_shutdown(struct platform_device *pdev) sh_mipi_dsi_enable(mipi, false); } -static int __init sh_mipi_setup(struct sh_mipi *mipi, - struct sh_mipi_dsi_info *pdata) +static int sh_mipi_setup(struct sh_mipi *mipi, struct sh_mipi_dsi_info *pdata) { void __iomem *base = mipi->base; struct sh_mobile_lcdc_chan_cfg *ch = pdata->lcd_chan; @@ -551,7 +550,7 @@ efindslot: return ret; } -static int __exit sh_mipi_remove(struct platform_device *pdev) +static int __devexit sh_mipi_remove(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct resource *res2 = platform_get_resource(pdev, IORESOURCE_MEM, 1); @@ -592,7 +591,7 @@ static int __exit sh_mipi_remove(struct platform_device *pdev) } static struct platform_driver sh_mipi_driver = { - .remove = __exit_p(sh_mipi_remove), + .remove = __devexit_p(sh_mipi_remove), .shutdown = sh_mipi_shutdown, .driver = { .name = "sh-mipi-dsi", diff --git a/drivers/video/sh_mobile_lcdcfb.c b/drivers/video/sh_mobile_lcdcfb.c index e672698bd820..699487c287b2 100644 --- a/drivers/video/sh_mobile_lcdcfb.c +++ b/drivers/video/sh_mobile_lcdcfb.c @@ -12,6 +12,7 @@ #include <linux/backlight.h> #include <linux/clk.h> #include <linux/console.h> +#include <linux/ctype.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/gpio.h> @@ -32,12 +33,176 @@ #include "sh_mobile_lcdcfb.h" +/* ---------------------------------------------------------------------------- + * Overlay register definitions + */ + +#define LDBCR 0xb00 +#define LDBCR_UPC(n) (1 << ((n) + 16)) +#define LDBCR_UPF(n) (1 << ((n) + 8)) +#define LDBCR_UPD(n) (1 << ((n) + 0)) +#define LDBnBSIFR(n) (0xb20 + (n) * 0x20 + 0x00) +#define LDBBSIFR_EN (1 << 31) +#define LDBBSIFR_VS (1 << 29) +#define LDBBSIFR_BRSEL (1 << 28) +#define LDBBSIFR_MX (1 << 27) +#define LDBBSIFR_MY (1 << 26) +#define LDBBSIFR_CV3 (3 << 24) +#define LDBBSIFR_CV2 (2 << 24) +#define LDBBSIFR_CV1 (1 << 24) +#define LDBBSIFR_CV0 (0 << 24) +#define LDBBSIFR_CV_MASK (3 << 24) +#define LDBBSIFR_LAY_MASK (0xff << 16) +#define LDBBSIFR_LAY_SHIFT 16 +#define LDBBSIFR_ROP3_MASK (0xff << 16) +#define LDBBSIFR_ROP3_SHIFT 16 +#define LDBBSIFR_AL_PL8 (3 << 14) +#define LDBBSIFR_AL_PL1 (2 << 14) +#define LDBBSIFR_AL_PK (1 << 14) +#define LDBBSIFR_AL_1 (0 << 14) +#define LDBBSIFR_AL_MASK (3 << 14) +#define LDBBSIFR_SWPL (1 << 10) +#define LDBBSIFR_SWPW (1 << 9) +#define LDBBSIFR_SWPB (1 << 8) +#define LDBBSIFR_RY (1 << 7) +#define LDBBSIFR_CHRR_420 (2 << 0) +#define LDBBSIFR_CHRR_422 (1 << 0) +#define LDBBSIFR_CHRR_444 (0 << 0) +#define LDBBSIFR_RPKF_ARGB32 (0x00 << 0) +#define LDBBSIFR_RPKF_RGB16 (0x03 << 0) +#define LDBBSIFR_RPKF_RGB24 (0x0b << 0) +#define LDBBSIFR_RPKF_MASK (0x1f << 0) +#define LDBnBSSZR(n) (0xb20 + (n) * 0x20 + 0x04) +#define LDBBSSZR_BVSS_MASK (0xfff << 16) +#define LDBBSSZR_BVSS_SHIFT 16 +#define LDBBSSZR_BHSS_MASK (0xfff << 0) +#define LDBBSSZR_BHSS_SHIFT 0 +#define LDBnBLOCR(n) (0xb20 + (n) * 0x20 + 0x08) +#define LDBBLOCR_CVLC_MASK (0xfff << 16) +#define LDBBLOCR_CVLC_SHIFT 16 +#define LDBBLOCR_CHLC_MASK (0xfff << 0) +#define LDBBLOCR_CHLC_SHIFT 0 +#define LDBnBSMWR(n) (0xb20 + (n) * 0x20 + 0x0c) +#define LDBBSMWR_BSMWA_MASK (0xffff << 16) +#define LDBBSMWR_BSMWA_SHIFT 16 +#define LDBBSMWR_BSMW_MASK (0xffff << 0) +#define LDBBSMWR_BSMW_SHIFT 0 +#define LDBnBSAYR(n) (0xb20 + (n) * 0x20 + 0x10) +#define LDBBSAYR_FG1A_MASK (0xff << 24) +#define LDBBSAYR_FG1A_SHIFT 24 +#define LDBBSAYR_FG1R_MASK (0xff << 16) +#define LDBBSAYR_FG1R_SHIFT 16 +#define LDBBSAYR_FG1G_MASK (0xff << 8) +#define LDBBSAYR_FG1G_SHIFT 8 +#define LDBBSAYR_FG1B_MASK (0xff << 0) +#define LDBBSAYR_FG1B_SHIFT 0 +#define LDBnBSACR(n) (0xb20 + (n) * 0x20 + 0x14) +#define LDBBSACR_FG2A_MASK (0xff << 24) +#define LDBBSACR_FG2A_SHIFT 24 +#define LDBBSACR_FG2R_MASK (0xff << 16) +#define LDBBSACR_FG2R_SHIFT 16 +#define LDBBSACR_FG2G_MASK (0xff << 8) +#define LDBBSACR_FG2G_SHIFT 8 +#define LDBBSACR_FG2B_MASK (0xff << 0) +#define LDBBSACR_FG2B_SHIFT 0 +#define LDBnBSAAR(n) (0xb20 + (n) * 0x20 + 0x18) +#define LDBBSAAR_AP_MASK (0xff << 24) +#define LDBBSAAR_AP_SHIFT 24 +#define LDBBSAAR_R_MASK (0xff << 16) +#define LDBBSAAR_R_SHIFT 16 +#define LDBBSAAR_GY_MASK (0xff << 8) +#define LDBBSAAR_GY_SHIFT 8 +#define LDBBSAAR_B_MASK (0xff << 0) +#define LDBBSAAR_B_SHIFT 0 +#define LDBnBPPCR(n) (0xb20 + (n) * 0x20 + 0x1c) +#define LDBBPPCR_AP_MASK (0xff << 24) +#define LDBBPPCR_AP_SHIFT 24 +#define LDBBPPCR_R_MASK (0xff << 16) +#define LDBBPPCR_R_SHIFT 16 +#define LDBBPPCR_GY_MASK (0xff << 8) +#define LDBBPPCR_GY_SHIFT 8 +#define LDBBPPCR_B_MASK (0xff << 0) +#define LDBBPPCR_B_SHIFT 0 +#define LDBnBBGCL(n) (0xb10 + (n) * 0x04) +#define LDBBBGCL_BGA_MASK (0xff << 24) +#define LDBBBGCL_BGA_SHIFT 24 +#define LDBBBGCL_BGR_MASK (0xff << 16) +#define LDBBBGCL_BGR_SHIFT 16 +#define LDBBBGCL_BGG_MASK (0xff << 8) +#define LDBBBGCL_BGG_SHIFT 8 +#define LDBBBGCL_BGB_MASK (0xff << 0) +#define LDBBBGCL_BGB_SHIFT 0 + #define SIDE_B_OFFSET 0x1000 #define MIRROR_OFFSET 0x2000 #define MAX_XRES 1920 #define MAX_YRES 1080 +enum sh_mobile_lcdc_overlay_mode { + LCDC_OVERLAY_BLEND, + LCDC_OVERLAY_ROP3, +}; + +/* + * struct sh_mobile_lcdc_overlay - LCDC display overlay + * + * @channel: LCDC channel this overlay belongs to + * @cfg: Overlay configuration + * @info: Frame buffer device + * @index: Overlay index (0-3) + * @base: Overlay registers base address + * @enabled: True if the overlay is enabled + * @mode: Overlay blending mode (alpha blend or ROP3) + * @alpha: Global alpha blending value (0-255, for alpha blending mode) + * @rop3: Raster operation (for ROP3 mode) + * @fb_mem: Frame buffer virtual memory address + * @fb_size: Frame buffer size in bytes + * @dma_handle: Frame buffer DMA address + * @base_addr_y: Overlay base address (RGB or luma component) + * @base_addr_c: Overlay base address (chroma component) + * @pan_y_offset: Panning linear offset in bytes (luma component) + * @format: Current pixelf format + * @xres: Horizontal visible resolution + * @xres_virtual: Horizontal total resolution + * @yres: Vertical visible resolution + * @yres_virtual: Vertical total resolution + * @pitch: Overlay line pitch + * @pos_x: Horizontal overlay position + * @pos_y: Vertical overlay position + */ +struct sh_mobile_lcdc_overlay { + struct sh_mobile_lcdc_chan *channel; + + const struct sh_mobile_lcdc_overlay_cfg *cfg; + struct fb_info *info; + + unsigned int index; + unsigned long base; + + bool enabled; + enum sh_mobile_lcdc_overlay_mode mode; + unsigned int alpha; + unsigned int rop3; + + void *fb_mem; + unsigned long fb_size; + + dma_addr_t dma_handle; + unsigned long base_addr_y; + unsigned long base_addr_c; + unsigned long pan_y_offset; + + const struct sh_mobile_lcdc_format_info *format; + unsigned int xres; + unsigned int xres_virtual; + unsigned int yres; + unsigned int yres_virtual; + unsigned int pitch; + int pos_x; + int pos_y; +}; + struct sh_mobile_lcdc_priv { void __iomem *base; int irq; @@ -45,7 +210,10 @@ struct sh_mobile_lcdc_priv { struct device *dev; struct clk *dot_clk; unsigned long lddckr; + struct sh_mobile_lcdc_chan ch[2]; + struct sh_mobile_lcdc_overlay overlays[4]; + struct notifier_block notifier; int started; int forced_fourcc; /* 2 channel LCDC must share fourcc setting */ @@ -141,6 +309,13 @@ static unsigned long lcdc_read_chan(struct sh_mobile_lcdc_chan *chan, return ioread32(chan->lcdc->base + chan->reg_offs[reg_nr]); } +static void lcdc_write_overlay(struct sh_mobile_lcdc_overlay *ovl, + int reg, unsigned long data) +{ + iowrite32(data, ovl->channel->lcdc->base + reg); + iowrite32(data, ovl->channel->lcdc->base + reg + SIDE_B_OFFSET); +} + static void lcdc_write(struct sh_mobile_lcdc_priv *priv, unsigned long reg_offs, unsigned long data) { @@ -384,8 +559,8 @@ sh_mobile_lcdc_must_reconfigure(struct sh_mobile_lcdc_chan *ch, return true; } -static int sh_mobile_check_var(struct fb_var_screeninfo *var, - struct fb_info *info); +static int sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var, + struct fb_info *info); static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch, enum sh_mobile_lcdc_entity_event event, @@ -439,7 +614,7 @@ static int sh_mobile_lcdc_display_notify(struct sh_mobile_lcdc_chan *ch, fb_videomode_to_var(&var, mode); var.bits_per_pixel = info->var.bits_per_pixel; var.grayscale = info->var.grayscale; - ret = sh_mobile_check_var(&var, info); + ret = sh_mobile_lcdc_check_var(&var, info); break; } @@ -585,7 +760,7 @@ static irqreturn_t sh_mobile_lcdc_irq(int irq, void *data) return IRQ_HANDLED; } -static int sh_mobile_wait_for_vsync(struct sh_mobile_lcdc_chan *ch) +static int sh_mobile_lcdc_wait_for_vsync(struct sh_mobile_lcdc_chan *ch) { unsigned long ldintr; int ret; @@ -685,8 +860,98 @@ static void sh_mobile_lcdc_geometry(struct sh_mobile_lcdc_chan *ch) lcdc_write_chan(ch, LDHAJR, tmp); } +static void sh_mobile_lcdc_overlay_setup(struct sh_mobile_lcdc_overlay *ovl) +{ + u32 format = 0; + + if (!ovl->enabled) { + lcdc_write(ovl->channel->lcdc, LDBCR, LDBCR_UPC(ovl->index)); + lcdc_write_overlay(ovl, LDBnBSIFR(ovl->index), 0); + lcdc_write(ovl->channel->lcdc, LDBCR, + LDBCR_UPF(ovl->index) | LDBCR_UPD(ovl->index)); + return; + } + + ovl->base_addr_y = ovl->dma_handle; + ovl->base_addr_c = ovl->dma_handle + + ovl->xres_virtual * ovl->yres_virtual; + + switch (ovl->mode) { + case LCDC_OVERLAY_BLEND: + format = LDBBSIFR_EN | (ovl->alpha << LDBBSIFR_LAY_SHIFT); + break; + + case LCDC_OVERLAY_ROP3: + format = LDBBSIFR_EN | LDBBSIFR_BRSEL + | (ovl->rop3 << LDBBSIFR_ROP3_SHIFT); + break; + } + + switch (ovl->format->fourcc) { + case V4L2_PIX_FMT_RGB565: + case V4L2_PIX_FMT_NV21: + case V4L2_PIX_FMT_NV61: + case V4L2_PIX_FMT_NV42: + format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW; + break; + case V4L2_PIX_FMT_BGR24: + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV24: + format |= LDBBSIFR_SWPL | LDBBSIFR_SWPW | LDBBSIFR_SWPB; + break; + case V4L2_PIX_FMT_BGR32: + default: + format |= LDBBSIFR_SWPL; + break; + } + + switch (ovl->format->fourcc) { + case V4L2_PIX_FMT_RGB565: + format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB16; + break; + case V4L2_PIX_FMT_BGR24: + format |= LDBBSIFR_AL_1 | LDBBSIFR_RY | LDBBSIFR_RPKF_RGB24; + break; + case V4L2_PIX_FMT_BGR32: + format |= LDBBSIFR_AL_PK | LDBBSIFR_RY | LDDFR_PKF_ARGB32; + break; + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_420; + break; + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_422; + break; + case V4L2_PIX_FMT_NV24: + case V4L2_PIX_FMT_NV42: + format |= LDBBSIFR_AL_1 | LDBBSIFR_CHRR_444; + break; + } + + lcdc_write(ovl->channel->lcdc, LDBCR, LDBCR_UPC(ovl->index)); + + lcdc_write_overlay(ovl, LDBnBSIFR(ovl->index), format); + + lcdc_write_overlay(ovl, LDBnBSSZR(ovl->index), + (ovl->yres << LDBBSSZR_BVSS_SHIFT) | + (ovl->xres << LDBBSSZR_BHSS_SHIFT)); + lcdc_write_overlay(ovl, LDBnBLOCR(ovl->index), + (ovl->pos_y << LDBBLOCR_CVLC_SHIFT) | + (ovl->pos_x << LDBBLOCR_CHLC_SHIFT)); + lcdc_write_overlay(ovl, LDBnBSMWR(ovl->index), + ovl->pitch << LDBBSMWR_BSMW_SHIFT); + + lcdc_write_overlay(ovl, LDBnBSAYR(ovl->index), ovl->base_addr_y); + lcdc_write_overlay(ovl, LDBnBSACR(ovl->index), ovl->base_addr_c); + + lcdc_write(ovl->channel->lcdc, LDBCR, + LDBCR_UPF(ovl->index) | LDBCR_UPD(ovl->index)); +} + /* - * __sh_mobile_lcdc_start - Configure and tart the LCDC + * __sh_mobile_lcdc_start - Configure and start the LCDC * @priv: LCDC device * * Configure all enabled channels and start the LCDC device. All external @@ -839,27 +1104,25 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) /* Compute frame buffer base address and pitch for each channel. */ for (k = 0; k < ARRAY_SIZE(priv->ch); k++) { int pixelformat; - void *meram; + void *cache; ch = &priv->ch[k]; if (!ch->enabled) continue; ch->base_addr_y = ch->dma_handle; - ch->base_addr_c = ch->base_addr_y + ch->xres * ch->yres_virtual; + ch->base_addr_c = ch->dma_handle + + ch->xres_virtual * ch->yres_virtual; ch->line_size = ch->pitch; /* Enable MERAM if possible. */ - if (mdev == NULL || mdev->ops == NULL || - ch->cfg->meram_cfg == NULL) + if (mdev == NULL || ch->cfg->meram_cfg == NULL) continue; - /* we need to de-init configured ICBs before we can - * re-initialize them. - */ - if (ch->meram) { - mdev->ops->meram_unregister(mdev, ch->meram); - ch->meram = NULL; + /* Free the allocated MERAM cache. */ + if (ch->cache) { + sh_mobile_meram_cache_free(mdev, ch->cache); + ch->cache = NULL; } switch (ch->format->fourcc) { @@ -881,17 +1144,22 @@ static int sh_mobile_lcdc_start(struct sh_mobile_lcdc_priv *priv) break; } - meram = mdev->ops->meram_register(mdev, ch->cfg->meram_cfg, + cache = sh_mobile_meram_cache_alloc(mdev, ch->cfg->meram_cfg, ch->pitch, ch->yres, pixelformat, &ch->line_size); - if (!IS_ERR(meram)) { - mdev->ops->meram_update(mdev, meram, + if (!IS_ERR(cache)) { + sh_mobile_meram_cache_update(mdev, cache, ch->base_addr_y, ch->base_addr_c, &ch->base_addr_y, &ch->base_addr_c); - ch->meram = meram; + ch->cache = cache; } } + for (k = 0; k < ARRAY_SIZE(priv->overlays); ++k) { + struct sh_mobile_lcdc_overlay *ovl = &priv->overlays[k]; + sh_mobile_lcdc_overlay_setup(ovl); + } + /* Start the LCDC. */ __sh_mobile_lcdc_start(priv); @@ -953,12 +1221,10 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) sh_mobile_lcdc_display_off(ch); - /* disable the meram */ - if (ch->meram) { - struct sh_mobile_meram_info *mdev; - mdev = priv->meram_dev; - mdev->ops->meram_unregister(mdev, ch->meram); - ch->meram = 0; + /* Free the MERAM cache. */ + if (ch->cache) { + sh_mobile_meram_cache_free(priv->meram_dev, ch->cache); + ch->cache = 0; } } @@ -975,8 +1241,511 @@ static void sh_mobile_lcdc_stop(struct sh_mobile_lcdc_priv *priv) sh_mobile_lcdc_clk_off(priv); } +static int __sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + if (var->xres > MAX_XRES || var->yres > MAX_YRES) + return -EINVAL; + + /* Make sure the virtual resolution is at least as big as the visible + * resolution. + */ + if (var->xres_virtual < var->xres) + var->xres_virtual = var->xres; + if (var->yres_virtual < var->yres) + var->yres_virtual = var->yres; + + if (sh_mobile_format_is_fourcc(var)) { + const struct sh_mobile_lcdc_format_info *format; + + format = sh_mobile_format_info(var->grayscale); + if (format == NULL) + return -EINVAL; + var->bits_per_pixel = format->bpp; + + /* Default to RGB and JPEG color-spaces for RGB and YUV formats + * respectively. + */ + if (!format->yuv) + var->colorspace = V4L2_COLORSPACE_SRGB; + else if (var->colorspace != V4L2_COLORSPACE_REC709) + var->colorspace = V4L2_COLORSPACE_JPEG; + } else { + if (var->bits_per_pixel <= 16) { /* RGB 565 */ + var->bits_per_pixel = 16; + var->red.offset = 11; + var->red.length = 5; + var->green.offset = 5; + var->green.length = 6; + var->blue.offset = 0; + var->blue.length = 5; + var->transp.offset = 0; + var->transp.length = 0; + } else if (var->bits_per_pixel <= 24) { /* RGB 888 */ + var->bits_per_pixel = 24; + var->red.offset = 16; + var->red.length = 8; + var->green.offset = 8; + var->green.length = 8; + var->blue.offset = 0; + var->blue.length = 8; + var->transp.offset = 0; + var->transp.length = 0; + } else if (var->bits_per_pixel <= 32) { /* RGBA 888 */ + var->bits_per_pixel = 32; + var->red.offset = 16; + var->red.length = 8; + var->green.offset = 8; + var->green.length = 8; + var->blue.offset = 0; + var->blue.length = 8; + var->transp.offset = 24; + var->transp.length = 8; + } else + return -EINVAL; + + var->red.msb_right = 0; + var->green.msb_right = 0; + var->blue.msb_right = 0; + var->transp.msb_right = 0; + } + + /* Make sure we don't exceed our allocated memory. */ + if (var->xres_virtual * var->yres_virtual * var->bits_per_pixel / 8 > + info->fix.smem_len) + return -EINVAL; + + return 0; +} + /* ----------------------------------------------------------------------------- - * Frame buffer operations + * Frame buffer operations - Overlays + */ + +static ssize_t +overlay_alpha_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + + return scnprintf(buf, PAGE_SIZE, "%u\n", ovl->alpha); +} + +static ssize_t +overlay_alpha_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + unsigned int alpha; + char *endp; + + alpha = simple_strtoul(buf, &endp, 10); + if (isspace(*endp)) + endp++; + + if (endp - buf != count) + return -EINVAL; + + if (alpha > 255) + return -EINVAL; + + if (ovl->alpha != alpha) { + ovl->alpha = alpha; + + if (ovl->mode == LCDC_OVERLAY_BLEND && ovl->enabled) + sh_mobile_lcdc_overlay_setup(ovl); + } + + return count; +} + +static ssize_t +overlay_mode_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + + return scnprintf(buf, PAGE_SIZE, "%u\n", ovl->mode); +} + +static ssize_t +overlay_mode_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + unsigned int mode; + char *endp; + + mode = simple_strtoul(buf, &endp, 10); + if (isspace(*endp)) + endp++; + + if (endp - buf != count) + return -EINVAL; + + if (mode != LCDC_OVERLAY_BLEND && mode != LCDC_OVERLAY_ROP3) + return -EINVAL; + + if (ovl->mode != mode) { + ovl->mode = mode; + + if (ovl->enabled) + sh_mobile_lcdc_overlay_setup(ovl); + } + + return count; +} + +static ssize_t +overlay_position_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + + return scnprintf(buf, PAGE_SIZE, "%d,%d\n", ovl->pos_x, ovl->pos_y); +} + +static ssize_t +overlay_position_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + char *endp; + int pos_x; + int pos_y; + + pos_x = simple_strtol(buf, &endp, 10); + if (*endp != ',') + return -EINVAL; + + pos_y = simple_strtol(endp + 1, &endp, 10); + if (isspace(*endp)) + endp++; + + if (endp - buf != count) + return -EINVAL; + + if (ovl->pos_x != pos_x || ovl->pos_y != pos_y) { + ovl->pos_x = pos_x; + ovl->pos_y = pos_y; + + if (ovl->enabled) + sh_mobile_lcdc_overlay_setup(ovl); + } + + return count; +} + +static ssize_t +overlay_rop3_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + + return scnprintf(buf, PAGE_SIZE, "%u\n", ovl->rop3); +} + +static ssize_t +overlay_rop3_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct fb_info *info = dev_get_drvdata(dev); + struct sh_mobile_lcdc_overlay *ovl = info->par; + unsigned int rop3; + char *endp; + + rop3 = !!simple_strtoul(buf, &endp, 10); + if (isspace(*endp)) + endp++; + + if (endp - buf != count) + return -EINVAL; + + if (rop3 > 255) + return -EINVAL; + + if (ovl->rop3 != rop3) { + ovl->rop3 = rop3; + + if (ovl->mode == LCDC_OVERLAY_ROP3 && ovl->enabled) + sh_mobile_lcdc_overlay_setup(ovl); + } + + return count; +} + +static const struct device_attribute overlay_sysfs_attrs[] = { + __ATTR(ovl_alpha, S_IRUGO|S_IWUSR, + overlay_alpha_show, overlay_alpha_store), + __ATTR(ovl_mode, S_IRUGO|S_IWUSR, + overlay_mode_show, overlay_mode_store), + __ATTR(ovl_position, S_IRUGO|S_IWUSR, + overlay_position_show, overlay_position_store), + __ATTR(ovl_rop3, S_IRUGO|S_IWUSR, + overlay_rop3_show, overlay_rop3_store), +}; + +static const struct fb_fix_screeninfo sh_mobile_lcdc_overlay_fix = { + .id = "SH Mobile LCDC", + .type = FB_TYPE_PACKED_PIXELS, + .visual = FB_VISUAL_TRUECOLOR, + .accel = FB_ACCEL_NONE, + .xpanstep = 1, + .ypanstep = 1, + .ywrapstep = 0, + .capabilities = FB_CAP_FOURCC, +}; + +static int sh_mobile_lcdc_overlay_pan(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + struct sh_mobile_lcdc_overlay *ovl = info->par; + unsigned long base_addr_y; + unsigned long base_addr_c; + unsigned long y_offset; + unsigned long c_offset; + + if (!ovl->format->yuv) { + y_offset = (var->yoffset * ovl->xres_virtual + var->xoffset) + * ovl->format->bpp / 8; + c_offset = 0; + } else { + unsigned int xsub = ovl->format->bpp < 24 ? 2 : 1; + unsigned int ysub = ovl->format->bpp < 16 ? 2 : 1; + + y_offset = var->yoffset * ovl->xres_virtual + var->xoffset; + c_offset = var->yoffset / ysub * ovl->xres_virtual * 2 / xsub + + var->xoffset * 2 / xsub; + } + + /* If the Y offset hasn't changed, the C offset hasn't either. There's + * nothing to do in that case. + */ + if (y_offset == ovl->pan_y_offset) + return 0; + + /* Set the source address for the next refresh */ + base_addr_y = ovl->dma_handle + y_offset; + base_addr_c = ovl->dma_handle + ovl->xres_virtual * ovl->yres_virtual + + c_offset; + + ovl->base_addr_y = base_addr_y; + ovl->base_addr_c = base_addr_c; + ovl->pan_y_offset = y_offset; + + lcdc_write(ovl->channel->lcdc, LDBCR, LDBCR_UPC(ovl->index)); + + lcdc_write_overlay(ovl, LDBnBSAYR(ovl->index), ovl->base_addr_y); + lcdc_write_overlay(ovl, LDBnBSACR(ovl->index), ovl->base_addr_c); + + lcdc_write(ovl->channel->lcdc, LDBCR, + LDBCR_UPF(ovl->index) | LDBCR_UPD(ovl->index)); + + return 0; +} + +static int sh_mobile_lcdc_overlay_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg) +{ + struct sh_mobile_lcdc_overlay *ovl = info->par; + + switch (cmd) { + case FBIO_WAITFORVSYNC: + return sh_mobile_lcdc_wait_for_vsync(ovl->channel); + + default: + return -ENOIOCTLCMD; + } +} + +static int sh_mobile_lcdc_overlay_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) +{ + return __sh_mobile_lcdc_check_var(var, info); +} + +static int sh_mobile_lcdc_overlay_set_par(struct fb_info *info) +{ + struct sh_mobile_lcdc_overlay *ovl = info->par; + + ovl->format = + sh_mobile_format_info(sh_mobile_format_fourcc(&info->var)); + + ovl->xres = info->var.xres; + ovl->xres_virtual = info->var.xres_virtual; + ovl->yres = info->var.yres; + ovl->yres_virtual = info->var.yres_virtual; + + if (ovl->format->yuv) + ovl->pitch = info->var.xres_virtual; + else + ovl->pitch = info->var.xres_virtual * ovl->format->bpp / 8; + + sh_mobile_lcdc_overlay_setup(ovl); + + info->fix.line_length = ovl->pitch; + + if (sh_mobile_format_is_fourcc(&info->var)) { + info->fix.type = FB_TYPE_FOURCC; + info->fix.visual = FB_VISUAL_FOURCC; + } else { + info->fix.type = FB_TYPE_PACKED_PIXELS; + info->fix.visual = FB_VISUAL_TRUECOLOR; + } + + return 0; +} + +/* Overlay blanking. Disable the overlay when blanked. */ +static int sh_mobile_lcdc_overlay_blank(int blank, struct fb_info *info) +{ + struct sh_mobile_lcdc_overlay *ovl = info->par; + + ovl->enabled = !blank; + sh_mobile_lcdc_overlay_setup(ovl); + + /* Prevent the backlight from receiving a blanking event by returning + * a non-zero value. + */ + return 1; +} + +static struct fb_ops sh_mobile_lcdc_overlay_ops = { + .owner = THIS_MODULE, + .fb_read = fb_sys_read, + .fb_write = fb_sys_write, + .fb_fillrect = sys_fillrect, + .fb_copyarea = sys_copyarea, + .fb_imageblit = sys_imageblit, + .fb_blank = sh_mobile_lcdc_overlay_blank, + .fb_pan_display = sh_mobile_lcdc_overlay_pan, + .fb_ioctl = sh_mobile_lcdc_overlay_ioctl, + .fb_check_var = sh_mobile_lcdc_overlay_check_var, + .fb_set_par = sh_mobile_lcdc_overlay_set_par, +}; + +static void +sh_mobile_lcdc_overlay_fb_unregister(struct sh_mobile_lcdc_overlay *ovl) +{ + struct fb_info *info = ovl->info; + + if (info == NULL || info->dev == NULL) + return; + + unregister_framebuffer(ovl->info); +} + +static int __devinit +sh_mobile_lcdc_overlay_fb_register(struct sh_mobile_lcdc_overlay *ovl) +{ + struct sh_mobile_lcdc_priv *lcdc = ovl->channel->lcdc; + struct fb_info *info = ovl->info; + unsigned int i; + int ret; + + if (info == NULL) + return 0; + + ret = register_framebuffer(info); + if (ret < 0) + return ret; + + dev_info(lcdc->dev, "registered %s/overlay %u as %dx%d %dbpp.\n", + dev_name(lcdc->dev), ovl->index, info->var.xres, + info->var.yres, info->var.bits_per_pixel); + + for (i = 0; i < ARRAY_SIZE(overlay_sysfs_attrs); ++i) { + ret = device_create_file(info->dev, &overlay_sysfs_attrs[i]); + if (ret < 0) + return ret; + } + + return 0; +} + +static void +sh_mobile_lcdc_overlay_fb_cleanup(struct sh_mobile_lcdc_overlay *ovl) +{ + struct fb_info *info = ovl->info; + + if (info == NULL || info->device == NULL) + return; + + framebuffer_release(info); +} + +static int __devinit +sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl) +{ + struct sh_mobile_lcdc_priv *priv = ovl->channel->lcdc; + struct fb_var_screeninfo *var; + struct fb_info *info; + + /* Allocate and initialize the frame buffer device. */ + info = framebuffer_alloc(0, priv->dev); + if (info == NULL) { + dev_err(priv->dev, "unable to allocate fb_info\n"); + return -ENOMEM; + } + + ovl->info = info; + + info->flags = FBINFO_FLAG_DEFAULT; + info->fbops = &sh_mobile_lcdc_overlay_ops; + info->device = priv->dev; + info->screen_base = ovl->fb_mem; + info->par = ovl; + + /* Initialize fixed screen information. Restrict pan to 2 lines steps + * for NV12 and NV21. + */ + info->fix = sh_mobile_lcdc_overlay_fix; + snprintf(info->fix.id, sizeof(info->fix.id), + "SH Mobile LCDC Overlay %u", ovl->index); + info->fix.smem_start = ovl->dma_handle; + info->fix.smem_len = ovl->fb_size; + info->fix.line_length = ovl->pitch; + + if (ovl->format->yuv) + info->fix.visual = FB_VISUAL_FOURCC; + else + info->fix.visual = FB_VISUAL_TRUECOLOR; + + switch (ovl->format->fourcc) { + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: + info->fix.ypanstep = 2; + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + info->fix.xpanstep = 2; + } + + /* Initialize variable screen information. */ + var = &info->var; + memset(var, 0, sizeof(*var)); + var->xres = ovl->xres; + var->yres = ovl->yres; + var->xres_virtual = ovl->xres_virtual; + var->yres_virtual = ovl->yres_virtual; + var->activate = FB_ACTIVATE_NOW; + + /* Use the legacy API by default for RGB formats, and the FOURCC API + * for YUV formats. + */ + if (!ovl->format->yuv) + var->bits_per_pixel = ovl->format->bpp; + else + var->grayscale = ovl->format->fourcc; + + return sh_mobile_lcdc_overlay_check_var(var, info); +} + +/* ----------------------------------------------------------------------------- + * Frame buffer operations - main frame buffer */ static int sh_mobile_lcdc_setcolreg(u_int regno, @@ -1003,12 +1772,12 @@ static int sh_mobile_lcdc_setcolreg(u_int regno, return 0; } -static struct fb_fix_screeninfo sh_mobile_lcdc_fix = { +static const struct fb_fix_screeninfo sh_mobile_lcdc_fix = { .id = "SH Mobile LCDC", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .accel = FB_ACCEL_NONE, - .xpanstep = 0, + .xpanstep = 1, .ypanstep = 1, .ywrapstep = 0, .capabilities = FB_CAP_FOURCC, @@ -1035,78 +1804,74 @@ static void sh_mobile_lcdc_imageblit(struct fb_info *info, sh_mobile_lcdc_deferred_io_touch(info); } -static int sh_mobile_fb_pan_display(struct fb_var_screeninfo *var, - struct fb_info *info) +static int sh_mobile_lcdc_pan(struct fb_var_screeninfo *var, + struct fb_info *info) { struct sh_mobile_lcdc_chan *ch = info->par; struct sh_mobile_lcdc_priv *priv = ch->lcdc; unsigned long ldrcntr; - unsigned long new_pan_offset; unsigned long base_addr_y, base_addr_c; + unsigned long y_offset; unsigned long c_offset; - if (!ch->format->yuv) - new_pan_offset = var->yoffset * ch->pitch - + var->xoffset * (ch->format->bpp / 8); - else - new_pan_offset = var->yoffset * ch->pitch + var->xoffset; + if (!ch->format->yuv) { + y_offset = (var->yoffset * ch->xres_virtual + var->xoffset) + * ch->format->bpp / 8; + c_offset = 0; + } else { + unsigned int xsub = ch->format->bpp < 24 ? 2 : 1; + unsigned int ysub = ch->format->bpp < 16 ? 2 : 1; - if (new_pan_offset == ch->pan_offset) - return 0; /* No change, do nothing */ + y_offset = var->yoffset * ch->xres_virtual + var->xoffset; + c_offset = var->yoffset / ysub * ch->xres_virtual * 2 / xsub + + var->xoffset * 2 / xsub; + } - ldrcntr = lcdc_read(priv, _LDRCNTR); + /* If the Y offset hasn't changed, the C offset hasn't either. There's + * nothing to do in that case. + */ + if (y_offset == ch->pan_y_offset) + return 0; /* Set the source address for the next refresh */ - base_addr_y = ch->dma_handle + new_pan_offset; - if (ch->format->yuv) { - /* Set y offset */ - c_offset = var->yoffset * ch->pitch - * (ch->format->bpp - 8) / 8; - base_addr_c = ch->dma_handle + ch->xres * ch->yres_virtual - + c_offset; - /* Set x offset */ - if (ch->format->fourcc == V4L2_PIX_FMT_NV24) - base_addr_c += 2 * var->xoffset; - else - base_addr_c += var->xoffset; - } + base_addr_y = ch->dma_handle + y_offset; + base_addr_c = ch->dma_handle + ch->xres_virtual * ch->yres_virtual + + c_offset; - if (ch->meram) { - struct sh_mobile_meram_info *mdev; - - mdev = priv->meram_dev; - mdev->ops->meram_update(mdev, ch->meram, - base_addr_y, base_addr_c, - &base_addr_y, &base_addr_c); - } + if (ch->cache) + sh_mobile_meram_cache_update(priv->meram_dev, ch->cache, + base_addr_y, base_addr_c, + &base_addr_y, &base_addr_c); ch->base_addr_y = base_addr_y; ch->base_addr_c = base_addr_c; + ch->pan_y_offset = y_offset; lcdc_write_chan_mirror(ch, LDSA1R, base_addr_y); if (ch->format->yuv) lcdc_write_chan_mirror(ch, LDSA2R, base_addr_c); + ldrcntr = lcdc_read(priv, _LDRCNTR); if (lcdc_chan_is_sublcd(ch)) lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_SRS); else lcdc_write(ch->lcdc, _LDRCNTR, ldrcntr ^ LDRCNTR_MRS); - ch->pan_offset = new_pan_offset; sh_mobile_lcdc_deferred_io_touch(info); return 0; } -static int sh_mobile_ioctl(struct fb_info *info, unsigned int cmd, - unsigned long arg) +static int sh_mobile_lcdc_ioctl(struct fb_info *info, unsigned int cmd, + unsigned long arg) { + struct sh_mobile_lcdc_chan *ch = info->par; int retval; switch (cmd) { case FBIO_WAITFORVSYNC: - retval = sh_mobile_wait_for_vsync(info->par); + retval = sh_mobile_lcdc_wait_for_vsync(ch); break; default: @@ -1158,7 +1923,7 @@ static void sh_mobile_fb_reconfig(struct fb_info *info) * Locking: both .fb_release() and .fb_open() are called with info->lock held if * user == 1, or with console sem held, if user == 0. */ -static int sh_mobile_release(struct fb_info *info, int user) +static int sh_mobile_lcdc_release(struct fb_info *info, int user) { struct sh_mobile_lcdc_chan *ch = info->par; @@ -1179,7 +1944,7 @@ static int sh_mobile_release(struct fb_info *info, int user) return 0; } -static int sh_mobile_open(struct fb_info *info, int user) +static int sh_mobile_lcdc_open(struct fb_info *info, int user) { struct sh_mobile_lcdc_chan *ch = info->par; @@ -1192,7 +1957,8 @@ static int sh_mobile_open(struct fb_info *info, int user) return 0; } -static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *info) +static int sh_mobile_lcdc_check_var(struct fb_var_screeninfo *var, + struct fb_info *info) { struct sh_mobile_lcdc_chan *ch = info->par; struct sh_mobile_lcdc_priv *p = ch->lcdc; @@ -1200,9 +1966,7 @@ static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *in unsigned int best_xres = 0; unsigned int best_yres = 0; unsigned int i; - - if (var->xres > MAX_XRES || var->yres > MAX_YRES) - return -EINVAL; + int ret; /* If board code provides us with a list of available modes, make sure * we use one of them. Find the mode closest to the requested one. The @@ -1237,73 +2001,9 @@ static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *in var->yres = best_yres; } - /* Make sure the virtual resolution is at least as big as the visible - * resolution. - */ - if (var->xres_virtual < var->xres) - var->xres_virtual = var->xres; - if (var->yres_virtual < var->yres) - var->yres_virtual = var->yres; - - if (sh_mobile_format_is_fourcc(var)) { - const struct sh_mobile_lcdc_format_info *format; - - format = sh_mobile_format_info(var->grayscale); - if (format == NULL) - return -EINVAL; - var->bits_per_pixel = format->bpp; - - /* Default to RGB and JPEG color-spaces for RGB and YUV formats - * respectively. - */ - if (!format->yuv) - var->colorspace = V4L2_COLORSPACE_SRGB; - else if (var->colorspace != V4L2_COLORSPACE_REC709) - var->colorspace = V4L2_COLORSPACE_JPEG; - } else { - if (var->bits_per_pixel <= 16) { /* RGB 565 */ - var->bits_per_pixel = 16; - var->red.offset = 11; - var->red.length = 5; - var->green.offset = 5; - var->green.length = 6; - var->blue.offset = 0; - var->blue.length = 5; - var->transp.offset = 0; - var->transp.length = 0; - } else if (var->bits_per_pixel <= 24) { /* RGB 888 */ - var->bits_per_pixel = 24; - var->red.offset = 16; - var->red.length = 8; - var->green.offset = 8; - var->green.length = 8; - var->blue.offset = 0; - var->blue.length = 8; - var->transp.offset = 0; - var->transp.length = 0; - } else if (var->bits_per_pixel <= 32) { /* RGBA 888 */ - var->bits_per_pixel = 32; - var->red.offset = 16; - var->red.length = 8; - var->green.offset = 8; - var->green.length = 8; - var->blue.offset = 0; - var->blue.length = 8; - var->transp.offset = 24; - var->transp.length = 8; - } else - return -EINVAL; - - var->red.msb_right = 0; - var->green.msb_right = 0; - var->blue.msb_right = 0; - var->transp.msb_right = 0; - } - - /* Make sure we don't exceed our allocated memory. */ - if (var->xres_virtual * var->yres_virtual * var->bits_per_pixel / 8 > - info->fix.smem_len) - return -EINVAL; + ret = __sh_mobile_lcdc_check_var(var, info); + if (ret < 0) + return ret; /* only accept the forced_fourcc for dual channel configurations */ if (p->forced_fourcc && @@ -1313,7 +2013,7 @@ static int sh_mobile_check_var(struct fb_var_screeninfo *var, struct fb_info *in return 0; } -static int sh_mobile_set_par(struct fb_info *info) +static int sh_mobile_lcdc_set_par(struct fb_info *info) { struct sh_mobile_lcdc_chan *ch = info->par; int ret; @@ -1329,9 +2029,9 @@ static int sh_mobile_set_par(struct fb_info *info) ch->yres_virtual = info->var.yres_virtual; if (ch->format->yuv) - ch->pitch = info->var.xres; + ch->pitch = info->var.xres_virtual; else - ch->pitch = info->var.xres * ch->format->bpp / 8; + ch->pitch = info->var.xres_virtual * ch->format->bpp / 8; ret = sh_mobile_lcdc_start(ch->lcdc); if (ret < 0) @@ -1383,8 +2083,8 @@ static int sh_mobile_lcdc_blank(int blank, struct fb_info *info) * mode will reenable the clocks and update the screen in time, * so it does not need this. */ if (!info->fbdefio) { - sh_mobile_wait_for_vsync(ch); - sh_mobile_wait_for_vsync(ch); + sh_mobile_lcdc_wait_for_vsync(ch); + sh_mobile_lcdc_wait_for_vsync(ch); } sh_mobile_lcdc_clk_off(p); } @@ -1402,12 +2102,12 @@ static struct fb_ops sh_mobile_lcdc_ops = { .fb_copyarea = sh_mobile_lcdc_copyarea, .fb_imageblit = sh_mobile_lcdc_imageblit, .fb_blank = sh_mobile_lcdc_blank, - .fb_pan_display = sh_mobile_fb_pan_display, - .fb_ioctl = sh_mobile_ioctl, - .fb_open = sh_mobile_open, - .fb_release = sh_mobile_release, - .fb_check_var = sh_mobile_check_var, - .fb_set_par = sh_mobile_set_par, + .fb_pan_display = sh_mobile_lcdc_pan, + .fb_ioctl = sh_mobile_lcdc_ioctl, + .fb_open = sh_mobile_lcdc_open, + .fb_release = sh_mobile_lcdc_release, + .fb_check_var = sh_mobile_lcdc_check_var, + .fb_set_par = sh_mobile_lcdc_set_par, }; static void @@ -1514,19 +2214,24 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch, else info->fix.visual = FB_VISUAL_TRUECOLOR; - if (ch->format->fourcc == V4L2_PIX_FMT_NV12 || - ch->format->fourcc == V4L2_PIX_FMT_NV21) + switch (ch->format->fourcc) { + case V4L2_PIX_FMT_NV12: + case V4L2_PIX_FMT_NV21: info->fix.ypanstep = 2; + case V4L2_PIX_FMT_NV16: + case V4L2_PIX_FMT_NV61: + info->fix.xpanstep = 2; + } /* Initialize variable screen information using the first mode as - * default. The default Y virtual resolution is twice the panel size to - * allow for double-buffering. + * default. */ var = &info->var; fb_videomode_to_var(var, mode); var->width = ch->cfg->panel_cfg.width; var->height = ch->cfg->panel_cfg.height; - var->yres_virtual = var->yres * 2; + var->xres_virtual = ch->xres_virtual; + var->yres_virtual = ch->yres_virtual; var->activate = FB_ACTIVATE_NOW; /* Use the legacy API by default for RGB formats, and the FOURCC API @@ -1537,7 +2242,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch, else var->grayscale = ch->format->fourcc; - ret = sh_mobile_check_var(var, info); + ret = sh_mobile_lcdc_check_var(var, info); if (ret) return ret; @@ -1712,15 +2417,27 @@ static const struct fb_videomode default_720p __devinitconst = { static int sh_mobile_lcdc_remove(struct platform_device *pdev) { struct sh_mobile_lcdc_priv *priv = platform_get_drvdata(pdev); - int i; + unsigned int i; fb_unregister_client(&priv->notifier); + for (i = 0; i < ARRAY_SIZE(priv->overlays); i++) + sh_mobile_lcdc_overlay_fb_unregister(&priv->overlays[i]); for (i = 0; i < ARRAY_SIZE(priv->ch); i++) sh_mobile_lcdc_channel_fb_unregister(&priv->ch[i]); sh_mobile_lcdc_stop(priv); + for (i = 0; i < ARRAY_SIZE(priv->overlays); i++) { + struct sh_mobile_lcdc_overlay *ovl = &priv->overlays[i]; + + sh_mobile_lcdc_overlay_fb_cleanup(ovl); + + if (ovl->fb_mem) + dma_free_coherent(&pdev->dev, ovl->fb_size, + ovl->fb_mem, ovl->dma_handle); + } + for (i = 0; i < ARRAY_SIZE(priv->ch); i++) { struct sh_mobile_lcdc_chan *ch = &priv->ch[i]; @@ -1737,8 +2454,11 @@ static int sh_mobile_lcdc_remove(struct platform_device *pdev) } for (i = 0; i < ARRAY_SIZE(priv->ch); i++) { - if (priv->ch[i].bl) - sh_mobile_lcdc_bl_remove(priv->ch[i].bl); + struct sh_mobile_lcdc_chan *ch = &priv->ch[i]; + + if (ch->bl) + sh_mobile_lcdc_bl_remove(ch->bl); + mutex_destroy(&ch->open_lock); } if (priv->dot_clk) { @@ -1796,6 +2516,61 @@ static int __devinit sh_mobile_lcdc_check_interface(struct sh_mobile_lcdc_chan * } static int __devinit +sh_mobile_lcdc_overlay_init(struct sh_mobile_lcdc_priv *priv, + struct sh_mobile_lcdc_overlay *ovl) +{ + const struct sh_mobile_lcdc_format_info *format; + int ret; + + if (ovl->cfg->fourcc == 0) + return 0; + + /* Validate the format. */ + format = sh_mobile_format_info(ovl->cfg->fourcc); + if (format == NULL) { + dev_err(priv->dev, "Invalid FOURCC %08x\n", ovl->cfg->fourcc); + return -EINVAL; + } + + ovl->enabled = false; + ovl->mode = LCDC_OVERLAY_BLEND; + ovl->alpha = 255; + ovl->rop3 = 0; + ovl->pos_x = 0; + ovl->pos_y = 0; + + /* The default Y virtual resolution is twice the panel size to allow for + * double-buffering. + */ + ovl->format = format; + ovl->xres = ovl->cfg->max_xres; + ovl->xres_virtual = ovl->xres; + ovl->yres = ovl->cfg->max_yres; + ovl->yres_virtual = ovl->yres * 2; + + if (!format->yuv) + ovl->pitch = ovl->xres_virtual * format->bpp / 8; + else + ovl->pitch = ovl->xres_virtual; + + /* Allocate frame buffer memory. */ + ovl->fb_size = ovl->cfg->max_xres * ovl->cfg->max_yres + * format->bpp / 8 * 2; + ovl->fb_mem = dma_alloc_coherent(priv->dev, ovl->fb_size, + &ovl->dma_handle, GFP_KERNEL); + if (!ovl->fb_mem) { + dev_err(priv->dev, "unable to allocate buffer\n"); + return -ENOMEM; + } + + ret = sh_mobile_lcdc_overlay_fb_init(ovl); + if (ret < 0) + return ret; + + return 0; +} + +static int __devinit sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv, struct sh_mobile_lcdc_chan *ch) { @@ -1854,7 +2629,9 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv, num_modes = cfg->num_modes; } - /* Use the first mode as default. */ + /* Use the first mode as default. The default Y virtual resolution is + * twice the panel size to allow for double-buffering. + */ ch->format = format; ch->xres = mode->xres; ch->xres_virtual = mode->xres; @@ -1863,10 +2640,10 @@ sh_mobile_lcdc_channel_init(struct sh_mobile_lcdc_priv *priv, if (!format->yuv) { ch->colorspace = V4L2_COLORSPACE_SRGB; - ch->pitch = ch->xres * format->bpp / 8; + ch->pitch = ch->xres_virtual * format->bpp / 8; } else { ch->colorspace = V4L2_COLORSPACE_REC709; - ch->pitch = ch->xres; + ch->pitch = ch->xres_virtual; } ch->display.width = cfg->panel_cfg.width; @@ -1952,7 +2729,6 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) } init_waitqueue_head(&ch->frame_end_wait); init_completion(&ch->vsync_completion); - ch->pan_offset = 0; /* probe the backlight is there is one defined */ if (ch->cfg->bl_info.max_brightness) @@ -2003,6 +2779,17 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) goto err1; } + for (i = 0; i < ARRAY_SIZE(pdata->overlays); i++) { + struct sh_mobile_lcdc_overlay *ovl = &priv->overlays[i]; + + ovl->cfg = &pdata->overlays[i]; + ovl->channel = &priv->ch[0]; + + error = sh_mobile_lcdc_overlay_init(priv, ovl); + if (error) + goto err1; + } + error = sh_mobile_lcdc_start(priv); if (error) { dev_err(&pdev->dev, "unable to start hardware\n"); @@ -2017,6 +2804,14 @@ static int __devinit sh_mobile_lcdc_probe(struct platform_device *pdev) goto err1; } + for (i = 0; i < ARRAY_SIZE(pdata->overlays); i++) { + struct sh_mobile_lcdc_overlay *ovl = &priv->overlays[i]; + + error = sh_mobile_lcdc_overlay_fb_register(ovl); + if (error) + goto err1; + } + /* Failure ignored */ priv->notifier.notifier_call = sh_mobile_lcdc_notify; fb_register_client(&priv->notifier); diff --git a/drivers/video/sh_mobile_lcdcfb.h b/drivers/video/sh_mobile_lcdcfb.h index 5c3bddd2cb72..0f92f6544b94 100644 --- a/drivers/video/sh_mobile_lcdcfb.h +++ b/drivers/video/sh_mobile_lcdcfb.h @@ -47,6 +47,7 @@ struct sh_mobile_lcdc_entity { /* * struct sh_mobile_lcdc_chan - LCDC display channel * + * @pan_y_offset: Panning linear offset in bytes (luma component) * @base_addr_y: Frame buffer viewport base address (luma component) * @base_addr_c: Frame buffer viewport base address (chroma component) * @pitch: Frame buffer line pitch @@ -59,7 +60,7 @@ struct sh_mobile_lcdc_chan { unsigned long *reg_offs; unsigned long ldmt1r_value; unsigned long enabled; /* ME and SE in LDCNT2R */ - void *meram; + void *cache; struct mutex open_lock; /* protects the use counter */ int use_count; @@ -68,7 +69,7 @@ struct sh_mobile_lcdc_chan { unsigned long fb_size; dma_addr_t dma_handle; - unsigned long pan_offset; + unsigned long pan_y_offset; unsigned long frame_end; wait_queue_head_t frame_end_wait; diff --git a/drivers/video/sh_mobile_meram.c b/drivers/video/sh_mobile_meram.c index 82ba830bf95d..7a0ba8bb3fbe 100644 --- a/drivers/video/sh_mobile_meram.c +++ b/drivers/video/sh_mobile_meram.c @@ -11,6 +11,7 @@ #include <linux/device.h> #include <linux/err.h> +#include <linux/export.h> #include <linux/genalloc.h> #include <linux/io.h> #include <linux/kernel.h> @@ -194,13 +195,28 @@ static inline unsigned long meram_read_reg(void __iomem *base, unsigned int off) } /* ----------------------------------------------------------------------------- - * Allocation + * MERAM allocation and free + */ + +static unsigned long meram_alloc(struct sh_mobile_meram_priv *priv, size_t size) +{ + return gen_pool_alloc(priv->pool, size); +} + +static void meram_free(struct sh_mobile_meram_priv *priv, unsigned long mem, + size_t size) +{ + gen_pool_free(priv->pool, mem, size); +} + +/* ----------------------------------------------------------------------------- + * LCDC cache planes allocation, init, cleanup and free */ /* Allocate ICBs and MERAM for a plane. */ -static int __meram_alloc(struct sh_mobile_meram_priv *priv, - struct sh_mobile_meram_fb_plane *plane, - size_t size) +static int meram_plane_alloc(struct sh_mobile_meram_priv *priv, + struct sh_mobile_meram_fb_plane *plane, + size_t size) { unsigned long mem; unsigned long idx; @@ -215,7 +231,7 @@ static int __meram_alloc(struct sh_mobile_meram_priv *priv, return -ENOMEM; plane->marker = &priv->icbs[idx]; - mem = gen_pool_alloc(priv->pool, size * 1024); + mem = meram_alloc(priv, size * 1024); if (mem == 0) return -ENOMEM; @@ -229,11 +245,11 @@ static int __meram_alloc(struct sh_mobile_meram_priv *priv, } /* Free ICBs and MERAM for a plane. */ -static void __meram_free(struct sh_mobile_meram_priv *priv, - struct sh_mobile_meram_fb_plane *plane) +static void meram_plane_free(struct sh_mobile_meram_priv *priv, + struct sh_mobile_meram_fb_plane *plane) { - gen_pool_free(priv->pool, priv->meram + plane->marker->offset, - plane->marker->size * 1024); + meram_free(priv, priv->meram + plane->marker->offset, + plane->marker->size * 1024); __clear_bit(plane->marker->index, &priv->used_icb); __clear_bit(plane->cache->index, &priv->used_icb); @@ -248,62 +264,6 @@ static int is_nvcolor(int cspace) return 0; } -/* Allocate memory for the ICBs and mark them as used. */ -static struct sh_mobile_meram_fb_cache * -meram_alloc(struct sh_mobile_meram_priv *priv, - const struct sh_mobile_meram_cfg *cfg, - int pixelformat) -{ - struct sh_mobile_meram_fb_cache *cache; - unsigned int nplanes = is_nvcolor(pixelformat) ? 2 : 1; - int ret; - - if (cfg->icb[0].meram_size == 0) - return ERR_PTR(-EINVAL); - - if (nplanes == 2 && cfg->icb[1].meram_size == 0) - return ERR_PTR(-EINVAL); - - cache = kzalloc(sizeof(*cache), GFP_KERNEL); - if (cache == NULL) - return ERR_PTR(-ENOMEM); - - cache->nplanes = nplanes; - - ret = __meram_alloc(priv, &cache->planes[0], cfg->icb[0].meram_size); - if (ret < 0) - goto error; - - cache->planes[0].marker->current_reg = 1; - cache->planes[0].marker->pixelformat = pixelformat; - - if (cache->nplanes == 1) - return cache; - - ret = __meram_alloc(priv, &cache->planes[1], cfg->icb[1].meram_size); - if (ret < 0) { - __meram_free(priv, &cache->planes[0]); - goto error; - } - - return cache; - -error: - kfree(cache); - return ERR_PTR(-ENOMEM); -} - -/* Unmark the specified ICB as used. */ -static void meram_free(struct sh_mobile_meram_priv *priv, - struct sh_mobile_meram_fb_cache *cache) -{ - __meram_free(priv, &cache->planes[0]); - if (cache->nplanes == 2) - __meram_free(priv, &cache->planes[1]); - - kfree(cache); -} - /* Set the next address to fetch. */ static void meram_set_next_addr(struct sh_mobile_meram_priv *priv, struct sh_mobile_meram_fb_cache *cache, @@ -355,10 +315,10 @@ meram_get_next_icb_addr(struct sh_mobile_meram_info *pdata, (((x) * (y) + (MERAM_LINE_WIDTH - 1)) & ~(MERAM_LINE_WIDTH - 1)) /* Initialize MERAM. */ -static int meram_init(struct sh_mobile_meram_priv *priv, - struct sh_mobile_meram_fb_plane *plane, - unsigned int xres, unsigned int yres, - unsigned int *out_pitch) +static int meram_plane_init(struct sh_mobile_meram_priv *priv, + struct sh_mobile_meram_fb_plane *plane, + unsigned int xres, unsigned int yres, + unsigned int *out_pitch) { struct sh_mobile_meram_icb *marker = plane->marker; unsigned long total_byte_count = MERAM_CALC_BYTECOUNT(xres, yres); @@ -427,8 +387,8 @@ static int meram_init(struct sh_mobile_meram_priv *priv, return 0; } -static void meram_deinit(struct sh_mobile_meram_priv *priv, - struct sh_mobile_meram_fb_plane *plane) +static void meram_plane_cleanup(struct sh_mobile_meram_priv *priv, + struct sh_mobile_meram_fb_plane *plane) { /* disable ICB */ meram_write_icb(priv->base, plane->cache->index, MExxCTL, @@ -441,20 +401,82 @@ static void meram_deinit(struct sh_mobile_meram_priv *priv, } /* ----------------------------------------------------------------------------- - * Registration/unregistration + * MERAM operations */ -static void *sh_mobile_meram_register(struct sh_mobile_meram_info *pdata, - const struct sh_mobile_meram_cfg *cfg, - unsigned int xres, unsigned int yres, - unsigned int pixelformat, - unsigned int *pitch) +unsigned long sh_mobile_meram_alloc(struct sh_mobile_meram_info *pdata, + size_t size) +{ + struct sh_mobile_meram_priv *priv = pdata->priv; + + return meram_alloc(priv, size); +} +EXPORT_SYMBOL_GPL(sh_mobile_meram_alloc); + +void sh_mobile_meram_free(struct sh_mobile_meram_info *pdata, unsigned long mem, + size_t size) +{ + struct sh_mobile_meram_priv *priv = pdata->priv; + + meram_free(priv, mem, size); +} +EXPORT_SYMBOL_GPL(sh_mobile_meram_free); + +/* Allocate memory for the ICBs and mark them as used. */ +static struct sh_mobile_meram_fb_cache * +meram_cache_alloc(struct sh_mobile_meram_priv *priv, + const struct sh_mobile_meram_cfg *cfg, + int pixelformat) +{ + unsigned int nplanes = is_nvcolor(pixelformat) ? 2 : 1; + struct sh_mobile_meram_fb_cache *cache; + int ret; + + cache = kzalloc(sizeof(*cache), GFP_KERNEL); + if (cache == NULL) + return ERR_PTR(-ENOMEM); + + cache->nplanes = nplanes; + + ret = meram_plane_alloc(priv, &cache->planes[0], + cfg->icb[0].meram_size); + if (ret < 0) + goto error; + + cache->planes[0].marker->current_reg = 1; + cache->planes[0].marker->pixelformat = pixelformat; + + if (cache->nplanes == 1) + return cache; + + ret = meram_plane_alloc(priv, &cache->planes[1], + cfg->icb[1].meram_size); + if (ret < 0) { + meram_plane_free(priv, &cache->planes[0]); + goto error; + } + + return cache; + +error: + kfree(cache); + return ERR_PTR(-ENOMEM); +} + +void *sh_mobile_meram_cache_alloc(struct sh_mobile_meram_info *pdata, + const struct sh_mobile_meram_cfg *cfg, + unsigned int xres, unsigned int yres, + unsigned int pixelformat, unsigned int *pitch) { struct sh_mobile_meram_fb_cache *cache; struct sh_mobile_meram_priv *priv = pdata->priv; struct platform_device *pdev = pdata->pdev; + unsigned int nplanes = is_nvcolor(pixelformat) ? 2 : 1; unsigned int out_pitch; + if (priv == NULL) + return ERR_PTR(-ENODEV); + if (pixelformat != SH_MOBILE_MERAM_PF_NV && pixelformat != SH_MOBILE_MERAM_PF_NV24 && pixelformat != SH_MOBILE_MERAM_PF_RGB) @@ -469,10 +491,16 @@ static void *sh_mobile_meram_register(struct sh_mobile_meram_info *pdata, return ERR_PTR(-EINVAL); } + if (cfg->icb[0].meram_size == 0) + return ERR_PTR(-EINVAL); + + if (nplanes == 2 && cfg->icb[1].meram_size == 0) + return ERR_PTR(-EINVAL); + mutex_lock(&priv->lock); /* We now register the ICBs and allocate the MERAM regions. */ - cache = meram_alloc(priv, cfg, pixelformat); + cache = meram_cache_alloc(priv, cfg, pixelformat); if (IS_ERR(cache)) { dev_err(&pdev->dev, "MERAM allocation failed (%ld).", PTR_ERR(cache)); @@ -480,42 +508,50 @@ static void *sh_mobile_meram_register(struct sh_mobile_meram_info *pdata, } /* initialize MERAM */ - meram_init(priv, &cache->planes[0], xres, yres, &out_pitch); + meram_plane_init(priv, &cache->planes[0], xres, yres, &out_pitch); *pitch = out_pitch; if (pixelformat == SH_MOBILE_MERAM_PF_NV) - meram_init(priv, &cache->planes[1], xres, (yres + 1) / 2, - &out_pitch); + meram_plane_init(priv, &cache->planes[1], + xres, (yres + 1) / 2, &out_pitch); else if (pixelformat == SH_MOBILE_MERAM_PF_NV24) - meram_init(priv, &cache->planes[1], 2 * xres, (yres + 1) / 2, - &out_pitch); + meram_plane_init(priv, &cache->planes[1], + 2 * xres, (yres + 1) / 2, &out_pitch); err: mutex_unlock(&priv->lock); return cache; } +EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_alloc); -static void -sh_mobile_meram_unregister(struct sh_mobile_meram_info *pdata, void *data) +void +sh_mobile_meram_cache_free(struct sh_mobile_meram_info *pdata, void *data) { struct sh_mobile_meram_fb_cache *cache = data; struct sh_mobile_meram_priv *priv = pdata->priv; mutex_lock(&priv->lock); - /* deinit & free */ - meram_deinit(priv, &cache->planes[0]); - if (cache->nplanes == 2) - meram_deinit(priv, &cache->planes[1]); + /* Cleanup and free. */ + meram_plane_cleanup(priv, &cache->planes[0]); + meram_plane_free(priv, &cache->planes[0]); + + if (cache->nplanes == 2) { + meram_plane_cleanup(priv, &cache->planes[1]); + meram_plane_free(priv, &cache->planes[1]); + } - meram_free(priv, cache); + kfree(cache); mutex_unlock(&priv->lock); } - -static void -sh_mobile_meram_update(struct sh_mobile_meram_info *pdata, void *data, - unsigned long base_addr_y, unsigned long base_addr_c, - unsigned long *icb_addr_y, unsigned long *icb_addr_c) +EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_free); + +void +sh_mobile_meram_cache_update(struct sh_mobile_meram_info *pdata, void *data, + unsigned long base_addr_y, + unsigned long base_addr_c, + unsigned long *icb_addr_y, + unsigned long *icb_addr_c) { struct sh_mobile_meram_fb_cache *cache = data; struct sh_mobile_meram_priv *priv = pdata->priv; @@ -527,13 +563,7 @@ sh_mobile_meram_update(struct sh_mobile_meram_info *pdata, void *data, mutex_unlock(&priv->lock); } - -static struct sh_mobile_meram_ops sh_mobile_meram_ops = { - .module = THIS_MODULE, - .meram_register = sh_mobile_meram_register, - .meram_unregister = sh_mobile_meram_unregister, - .meram_update = sh_mobile_meram_update, -}; +EXPORT_SYMBOL_GPL(sh_mobile_meram_cache_update); /* ----------------------------------------------------------------------------- * Power management @@ -624,7 +654,6 @@ static int __devinit sh_mobile_meram_probe(struct platform_device *pdev) for (i = 0; i < MERAM_ICB_NUM; ++i) priv->icbs[i].index = i; - pdata->ops = &sh_mobile_meram_ops; pdata->priv = priv; pdata->pdev = pdev; diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c index 26f864289498..5533a32c6ca1 100644 --- a/drivers/video/smscufx.c +++ b/drivers/video/smscufx.c @@ -904,7 +904,7 @@ static ssize_t ufx_ops_write(struct fb_info *info, const char __user *buf, result = fb_sys_write(info, buf, count, ppos); if (result > 0) { - int start = max((int)(offset / info->fix.line_length) - 1, 0); + int start = max((int)(offset / info->fix.line_length), 0); int lines = min((u32)((result / info->fix.line_length) + 1), (u32)info->var.yres); diff --git a/drivers/video/w100fb.c b/drivers/video/w100fb.c index 90a2e30272ad..2f6b2b835f88 100644 --- a/drivers/video/w100fb.c +++ b/drivers/video/w100fb.c @@ -1567,6 +1567,18 @@ static void w100_suspend(u32 mode) val = readl(remapped_regs + mmPLL_CNTL); val |= 0x00000004; /* bit2=1 */ writel(val, remapped_regs + mmPLL_CNTL); + + writel(0x00000000, remapped_regs + mmLCDD_CNTL1); + writel(0x00000000, remapped_regs + mmLCDD_CNTL2); + writel(0x00000000, remapped_regs + mmGENLCD_CNTL1); + writel(0x00000000, remapped_regs + mmGENLCD_CNTL2); + writel(0x00000000, remapped_regs + mmGENLCD_CNTL3); + + val = readl(remapped_regs + mmMEM_EXT_CNTL); + val |= 0xF0000000; + val &= ~(0x00000001); + writel(val, remapped_regs + mmMEM_EXT_CNTL); + writel(0x0000001d, remapped_regs + mmPWRMGT_CNTL); } } |