*/
typedef enum {
AHD_FENONE = 0x00000,
- AHD_WIDE = 0x00001,/* Wide Channel */
+ AHD_WIDE = 0x00001,/* Wide Channel */
AHD_AIC79XXB_SLOWCRC = 0x00002,/* SLOWCRC bit should be set */
AHD_MULTI_FUNC = 0x00100,/* Multi-Function/Channel Device */
AHD_TARGETMODE = 0x01000,/* Has tested target mode support */
* Target mode version of the shared data SCB segment.
*/
struct target_data {
- uint32_t spare[2];
+ uint32_t spare[2];
uint8_t scsi_status; /* SCSI status to give to initiator */
uint8_t target_phases; /* Bitmap of phases to execute */
uint8_t data_phase; /* Data-In or Data-Out */
struct ahd_softc *ahd_softc;
scb_flag flags;
struct scb_platform_data *platform_data;
- struct map_node *hscb_map;
- struct map_node *sg_map;
- struct map_node *sense_map;
+ struct map_node *hscb_map;
+ struct map_node *sg_map;
+ struct map_node *sense_map;
void *sg_list;
uint8_t *sense_data;
dma_addr_t sg_list_busaddr;
struct target_cmd {
uint8_t scsiid; /* Our ID and the initiator's ID */
uint8_t identify; /* Identify message */
- uint8_t bytes[22]; /*
+ uint8_t bytes[22]; /*
* Bytes contains any additional message
* bytes terminated by 0xFF. The remainder
* is the cdb to execute.
* structure here so we can store arrays of them, etc. in OS neutral
* data structures.
*/
-#ifdef AHD_TARGET_MODE
+#ifdef AHD_TARGET_MODE
struct ahd_tmode_lstate {
struct cam_path *path;
struct ccb_hdr_slist accept_tios;
/***************************** Lookup Tables **********************************/
/*
* Phase -> name and message out response
- * to parity errors in each phase table.
+ * to parity errors in each phase table.
*/
struct ahd_phase_table_entry {
- uint8_t phase;
- uint8_t mesg_out; /* Message response to parity errors */
+ uint8_t phase;
+ uint8_t mesg_out; /* Message response to parity errors */
const char *phasemsg;
};
#define CFBS_ENABLED 0x04
#define CFBS_DISABLED_SCAN 0x08
#define CFENABLEDV 0x0010 /* Perform Domain Validation */
-#define CFCTRL_A 0x0020 /* BIOS displays Ctrl-A message */
+#define CFCTRL_A 0x0020 /* BIOS displays Ctrl-A message */
#define CFSPARITY 0x0040 /* SCSI parity */
#define CFEXTEND 0x0080 /* extended translation enabled */
#define CFBOOTCD 0x0100 /* Support Bootable CD-ROM */
/*
* Host Adapter Control Bits
*/
- uint16_t adapter_control; /* word 17 */
+ uint16_t adapter_control; /* word 17 */
#define CFAUTOTERM 0x0001 /* Perform Auto termination */
#define CFSTERM 0x0002 /* SCSI low byte termination */
#define CFWSTERM 0x0004 /* SCSI high byte termination */
#define CFSEHIGHTERM 0x0020 /* Ultra2 secondary high term */
#define CFSTPWLEVEL 0x0040 /* Termination level control */
#define CFBIOSAUTOTERM 0x0080 /* Perform Auto termination */
-#define CFTERM_MENU 0x0100 /* BIOS displays termination menu */
+#define CFTERM_MENU 0x0100 /* BIOS displays termination menu */
#define CFCLUSTERENB 0x8000 /* Cluster Enable */
/*
/*
* Maximum targets
*/
- uint16_t max_targets; /* word 19 */
+ uint16_t max_targets; /* word 19 */
#define CFMAXTARG 0x00ff /* maximum targets */
#define CFBOOTLUN 0x0f00 /* Lun to boot from */
#define CFBOOTID 0xf000 /* Target to boot from */
#define FLX_ROMSTAT_EE_2MBx8 0x2
#define FLX_ROMSTAT_EE_4MBx8 0x3
#define FLX_ROMSTAT_EE_16MBx8 0x4
-#define CURSENSE_ENB 0x1
+#define CURSENSE_ENB 0x1
#define FLXADDR_FLEXSTAT 0x2
#define FLX_FSTAT_BUSY 0x1
#define FLXADDR_CURRENT_STAT 0x4
};
struct ahd_softc {
- bus_space_tag_t tags[2];
- bus_space_handle_t bshs[2];
+ bus_space_tag_t tags[2];
+ bus_space_handle_t bshs[2];
struct scb_data scb_data;
struct hardware_scb *next_queued_hscb;
u_int int_coalescing_threshold;
u_int int_coalescing_stop_threshold;
- uint16_t user_discenable;/* Disconnection allowed */
+ uint16_t user_discenable;/* Disconnection allowed */
uint16_t user_tagenable;/* Tagged Queuing allowed */
};
* Hardware error codes.
*/
struct ahd_hard_error_entry {
- uint8_t errno;
+ uint8_t errno;
const char *errmesg;
};
u_int scsi_id, char channel, int force);
#endif
static void ahd_devlimited_syncrate(struct ahd_softc *ahd,
- struct ahd_initiator_tinfo *,
+ struct ahd_initiator_tinfo *,
u_int *period,
u_int *ppr_options,
role_t role);
static u_int ahd_sglist_size(struct ahd_softc *ahd);
static u_int ahd_sglist_allocsize(struct ahd_softc *ahd);
static bus_dmamap_callback_t
- ahd_dmamap_cb;
+ ahd_dmamap_cb;
static void ahd_initialize_hscbs(struct ahd_softc *ahd);
static int ahd_init_scbdata(struct ahd_softc *ahd);
static void ahd_fini_scbdata(struct ahd_softc *ahd);
static void ahd_handle_hwerrint(struct ahd_softc *ahd);
static void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat);
static void ahd_handle_scsiint(struct ahd_softc *ahd,
- u_int intstat);
+ u_int intstat);
/************************ Sequencer Execution Control *************************/
void
while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) {
u_int fifo_mode;
u_int i;
-
+
scbid = ahd_inw(ahd, GSFIFO);
scb = ahd_lookup_scb(ahd, scbid);
if (scb == NULL) {
while (!SCBID_IS_NULL(scbid)) {
uint8_t *hscb_ptr;
u_int i;
-
+
ahd_set_scbptr(ahd, scbid);
next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE);
scb = ahd_lookup_scb(ahd, scbid);
{
struct scb *scb;
u_int scb_index;
-
+
#ifdef AHD_DEBUG
if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) {
printk("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd),
ahd->msg_type =
MSG_TYPE_TARGET_MSGOUT;
ahd->msgin_index = 0;
- }
- else
+ } else
ahd_setup_target_msgin(ahd,
&devinfo,
scb);
;
ahd_outb(ahd, SCB_TASK_MANAGEMENT, 0);
ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb),
- SCB_GET_CHANNEL(ahd, scb),
- SCB_GET_LUN(scb), SCB_GET_TAG(scb),
- ROLE_INITIATOR, /*status*/0,
+ SCB_GET_CHANNEL(ahd, scb),
+ SCB_GET_LUN(scb), SCB_GET_TAG(scb),
+ ROLE_INITIATOR, /*status*/0,
SEARCH_REMOVE);
}
break;
perrdiag = ahd_inb(ahd, PERRDIAG);
msg_out = MSG_INITIATOR_DET_ERR;
ahd_outb(ahd, CLRSINT1, CLRSCSIPERR);
-
+
/*
* Try to find the SCB associated with this error.
*/
silent = FALSE;
if (lqistat1 == 0
|| (lqistat1 & LQICRCI_NLQ) != 0) {
- if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0)
+ if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0)
ahd_set_active_fifo(ahd);
scbid = ahd_get_scbptr(ahd);
scb = ahd_lookup_scb(ahd, scbid);
ahd_lookup_phase_entry(curphase)->phasemsg);
ahd_inb(ahd, SCSIDAT);
}
-
+
if (curphase == P_MESGIN)
msg_out = MSG_PARITY_ERROR;
}
cs = ahd->critical_sections;
for (i = 0; i < ahd->num_critical_sections; i++, cs++) {
-
if (cs->begin < seqaddr && cs->end >= seqaddr)
break;
}
if (stepping == FALSE) {
first_instr = seqaddr;
- ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
- simode0 = ahd_inb(ahd, SIMODE0);
+ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG);
+ simode0 = ahd_inb(ahd, SIMODE0);
simode3 = ahd_inb(ahd, SIMODE3);
lqimode0 = ahd_inb(ahd, LQIMODE0);
lqimode1 = ahd_inb(ahd, LQIMODE1);
ahd_outb(ahd, LQOMODE1, lqomode1);
ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP);
- ahd_outb(ahd, SIMODE1, simode1);
+ ahd_outb(ahd, SIMODE1, simode1);
/*
* SCSIINT seems to glitch occasionally when
* the interrupt masks are restored. Clear SCSIINT
ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI
|CLRBUSFREE|CLRSCSIPERR|CLRREQINIT);
ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO
- |CLRIOERR|CLROVERRUN);
+ |CLRIOERR|CLROVERRUN);
ahd_outb(ahd, CLRINT, CLRSCSIINT);
}
*/
if (role == ROLE_TARGET)
transinfo = &tinfo->user;
- else
+ else
transinfo = &tinfo->goal;
*ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN);
if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) {
if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0
&& *period > AHD_SYNCRATE_MIN_DT)
*ppr_options &= ~MSG_EXT_PPR_DT_REQ;
-
+
if (*period > AHD_SYNCRATE_MIN)
*period = 0;
ahd_outb(ahd, NEGOADDR, devinfo->target);
period = tinfo->period;
offset = tinfo->offset;
- memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts));
+ memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts));
ppr_opts = tinfo->ppr_options & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ
|MSG_EXT_PPR_IU_REQ|MSG_EXT_PPR_RTI);
con_opts = 0;
#endif
ahd_assert_atn(ahd);
}
- } else
+ } else
ahd->msgin_index++;
if (message_done == MSGLOOP_TERMINATED) {
*/
return;
}
-
+
ahd->msgin_index++;
/*
u_int ppr_options;
u_int offset;
u_int saved_offset;
-
+
if (ahd->msgin_buf[1] != MSG_EXT_SDTR_LEN) {
reject = TRUE;
break;
*/
ahd_outb(ahd, SCB_CONTROL,
ahd_inb_scbram(ahd, SCB_CONTROL) & mask);
- scb->hscb->control &= mask;
+ scb->hscb->control &= mask;
ahd_set_transaction_tag(scb, /*enabled*/FALSE,
/*type*/MSG_SIMPLE_TASK);
ahd_outb(ahd, MSG_OUT, MSG_IDENTIFYFLAG);
AHD_ASSERT_MODES(ahd, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK,
AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK);
-
+
scb_index = ahd_get_scbptr(ahd);
scb = ahd_lookup_scb(ahd, scb_index);
ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0,
/*ppr_options*/0, AHD_TRANS_CUR,
/*paused*/TRUE);
-
+
if (status != CAM_SEL_TIMEOUT)
ahd_send_async(ahd, devinfo->channel, devinfo->target,
CAM_LUN_WILDCARD, AC_SENT_BDR);
struct scb *scb)
{
- /*
+ /*
* To facilitate adding multiple messages together,
* each routine should increment the index and len
* variables instead of setting them explicitly.
- */
+ */
ahd->msgout_index = 0;
ahd->msgout_len = 0;
{
ahd->unpause = 0;
- ahd->pause = PAUSE;
+ ahd->pause = PAUSE;
return (0);
}
u_int sxfrctl1;
int wait;
uint32_t cmd;
-
+
/*
* Preserve the value of the SXFRCTL1 register for all channels.
* It contains settings that affect termination and we don't want
/*
* Note that we were successful
*/
- return (0);
+ return (0);
error_exit:
static const char *channel_strings[] = {
"Primary Low",
"Primary High",
- "Secondary Low",
+ "Secondary Low",
"Secondary High"
};
} else {
sxfrctl1 |= ahd->seltime;
}
-
+
ahd_outb(ahd, SXFRCTL0, DFON);
ahd_outb(ahd, SXFRCTL1, sxfrctl1|ahd->seltime|ENSTIMER|ACTNEGEN);
ahd_outb(ahd, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR);
ahd_outb(ahd, CMDSIZE_TABLE + 5, 11);
ahd_outb(ahd, CMDSIZE_TABLE + 6, 0);
ahd_outb(ahd, CMDSIZE_TABLE + 7, 0);
-
+
/* Tell the sequencer of our initial queue positions */
ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN);
ahd_outb(ahd, QOFF_CTLSTA, SCB_QSIZE_512);
ahd_resume(struct ahd_softc *ahd)
{
ahd_reset(ahd, /*reinit*/TRUE);
- ahd_intr_enable(ahd, TRUE);
+ ahd_intr_enable(ahd, TRUE);
ahd_restart(ahd);
}
u_int scbid;
u_int scb_offset;
u_int saved_scbptr;
-
+
scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl);
scbid = ahd_inw_scbram(ahd, scb_offset);
ahd_set_scbptr(ahd, saved_scbptr);
{
u_int scb_offset;
u_int saved_scbptr;
-
+
scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl);
ahd_outw(ahd, scb_offset, scbid);
ahd_set_scbptr(ahd, saved_scbptr);
target = SCB_GET_TARGET(ahd, scb);
lun = SCB_GET_LUN(scb);
channel = SCB_GET_CHANNEL(ahd, scb);
-
+
ahd_search_qinfifo(ahd, target, channel, lun,
/*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
CAM_REQUEUE_REQ, SEARCH_COMPLETE);
ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr);
} else {
prev_scb->hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr;
- ahd_sync_scb(ahd, prev_scb,
+ ahd_sync_scb(ahd, prev_scb,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
}
ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb);
static int
ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel,
int lun, u_int tag, role_t role, uint32_t status,
- ahd_search_action action, u_int *list_head,
+ ahd_search_action action, u_int *list_head,
u_int *list_tail, u_int tid)
{
struct scb *scb;
struct ahd_softc *ahd = from_timer(ahd, t, stat_timer);
u_long s;
int enint_coal;
-
+
ahd_lock(ahd, &s);
enint_coal = ahd->hs_mailbox & ENINT_COALESCE;
* operations are on data structures that the sequencer
* is not touching once the queue is frozen.
*/
- hscb = scb->hscb;
+ hscb = scb->hscb;
if (ahd_is_paused(ahd)) {
paused = 1;
/*
* Remainder of the SG where the transfer
- * stopped.
+ * stopped.
*/
resid = ahd_le32toh(spkt->residual_datacnt) & AHD_SG_LEN_MASK;
sg = ahd_sg_bus_to_virt(ahd, scb, resid_sgptr & SG_PTR_MASK);
/*
* Setup downloadable constant table.
- *
+ *
* The computation for the S/G prefetch variables is
* a bit complicated. We would like to always fetch
* in terms of cachelined sized increments. However,
if (begin_set[cs_count] == TRUE
&& end_set[cs_count] == FALSE) {
cs_table[cs_count].end = downloaded;
- end_set[cs_count] = TRUE;
+ end_set[cs_count] = TRUE;
cs_count++;
}
continue;
printed_mask == 0 ? ":(" : "|",
table[entry].name);
printed_mask |= table[entry].mask;
-
+
break;
}
if (entry >= num_entries)
ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI);
printk(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n"
"%s: Dumping Card State at program address 0x%x Mode 0x%x\n",
- ahd_name(ahd),
+ ahd_name(ahd),
ahd_inw(ahd, CURADDR),
ahd_build_mode_state(ahd, ahd->saved_src_mode,
ahd->saved_dst_mode));
}
printk("\n");
-
printk("Sequencer DMA-Up and Complete list: ");
scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD);
i = 0;
ahd_outb(ahd, SEEADR, cur_addr);
ahd_outb(ahd, SEECTL, SEEOP_READ | SEESTART);
-
+
error = ahd_wait_seeprom(ahd);
if (error)
break;
ahd_outw(ahd, SEEDAT, *buf++);
ahd_outb(ahd, SEEADR, cur_addr);
ahd_outb(ahd, SEECTL, SEEOP_WRITE | SEESTART);
-
+
retval = ahd_wait_seeprom(ahd);
if (retval)
break;
error = ahd_read_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, &seetype);
if (error != 0
- || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE))
+ || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE))
return (0);
return (1);
#endif
our_id = ahd->our_id;
if (ccb->ccb_h.target_id != our_id) {
if ((ahd->features & AHD_MULTI_TID) != 0
- && (ahd->flags & AHD_INITIATORROLE) != 0) {
+ && (ahd->flags & AHD_INITIATORROLE) != 0) {
/*
* Only allow additional targets if
* the initiator role is disabled.
}
ahd_lock(ahd, &s);
-
+
ccb->ccb_h.status = CAM_REQ_CMP;
LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) {
struct ccb_hdr *ccbh;
printk("Reserved or VU command code type encountered\n");
break;
}
-
+
memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
atio->ccb_h.status |= CAM_CDB_RECVD;
int active;
/*
- * The currently allowed number of
+ * The currently allowed number of
* transactions that can be queued to
* the device. Must be signed for
* conversion from tagged to untagged
* device's queue is halted.
*/
u_int qfrozen;
-
+
/*
* Cumulative command counter.
*/
/*
* Fields accessed from interrupt context.
*/
- struct scsi_target *starget[AHD_NUM_TARGETS];
+ struct scsi_target *starget[AHD_NUM_TARGETS];
spinlock_t spin_lock;
struct completion *eh_done;
- struct Scsi_Host *host; /* pointer to scsi host */
+ struct Scsi_Host *host; /* pointer to scsi host */
#define AHD_LINUX_NOIRQ ((uint32_t)~0)
uint32_t irq; /* IRQ for this adapter */
uint32_t bios_address;
ahd_freeze_scb(struct scb *scb)
{
if ((scb->io_ctx->result & (CAM_DEV_QFRZN << 16)) == 0) {
- scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
- scb->platform_data->dev->qfrozen++;
- }
+ scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
+ scb->platform_data->dev->qfrozen++;
+ }
}
void ahd_platform_set_tags(struct ahd_softc *ahd, struct scsi_device *sdev,
/* Define the macro locally since it's different for different class of chips.
*/
-#define ID(x) \
- ID2C(x), \
+#define ID(x) \
+ ID2C(x), \
ID2C(IDIROC(x))
static const struct pci_device_id ahd_linux_pci_id_table[] = {
IRQF_SHARED, "aic79xx", ahd);
if (!error)
ahd->platform_data->irq = ahd->dev_softc->irq;
-
+
return (-error);
}
seq_puts(m, "Renegotiation Pending\n");
return;
}
- speed = 3300;
- freq = 0;
+ speed = 3300;
+ freq = 0;
if (tinfo->offset != 0) {
freq = ahd_calc_syncsrate(tinfo->period);
speed = freq;
}
speed *= (0x01 << tinfo->width);
- mb = speed / 1000;
- if (mb > 0)
+ mb = speed / 1000;
+ if (mb > 0)
seq_printf(m, "%d.%03dMB/s transfers", mb, speed % 1000);
- else
+ else
seq_printf(m, "%dKB/s transfers", speed);
if (freq != 0) {
u_int start_addr;
if (ahd->seep_config == NULL) {
- ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC);
+ ahd->seep_config = kmalloc(sizeof(*ahd->seep_config),
+ GFP_ATOMIC);
if (ahd->seep_config == NULL) {
printk("aic79xx: Unable to allocate serial "
"eeprom buffer. Write failing\n");
* add other 93Cx6 functions.
*/
struct seeprom_cmd {
- uint8_t len;
- uint8_t bits[11];
+ uint8_t len;
+ uint8_t bits[11];
};
/* Short opcodes for the c46 */
* Hardware error codes.
*/
struct ahc_hard_error_entry {
- uint8_t errno;
+ uint8_t errno;
const char *errmesg;
};
#endif
static const struct ahc_syncrate*
ahc_devlimited_syncrate(struct ahc_softc *ahc,
- struct ahc_initiator_tinfo *,
+ struct ahc_initiator_tinfo *,
u_int *period,
u_int *ppr_options,
role_t role);
struct scb *scb);
#endif
-static bus_dmamap_callback_t ahc_dmamap_cb;
+static bus_dmamap_callback_t ahc_dmamap_cb;
static void ahc_build_free_scb_list(struct ahc_softc *ahc);
static int ahc_init_scbdata(struct ahc_softc *ahc);
static void ahc_fini_scbdata(struct ahc_softc *ahc);
{
struct scb *scb;
struct ahc_devinfo devinfo;
-
+
ahc_fetch_devinfo(ahc, &devinfo);
/*
goto unpause;
}
- hscb = scb->hscb;
+ hscb = scb->hscb;
/* Don't want to clobber the original sense code */
if ((scb->flags & SCB_SENSE) != 0) {
&tstate);
tinfo = &targ_info->curr;
sg = scb->sg_list;
- sc = (struct scsi_sense *)(&hscb->shared_data.cdb);
+ sc = (struct scsi_sense *)(&hscb->shared_data.cdb);
/*
* Save off the residual if there is one.
*/
* errors will be reported before any data
* phases occur.
*/
- if (ahc_get_residual(scb)
- == ahc_get_transfer_length(scb)) {
+ if (ahc_get_residual(scb)
+ == ahc_get_transfer_length(scb)) {
ahc_update_neg_request(ahc, &devinfo,
tstate, targ_info,
AHC_NEG_IF_NON_ASYNC);
scb->flags |= SCB_AUTO_NEGOTIATE;
}
hscb->cdb_len = sizeof(*sc);
- hscb->dataptr = sg->addr;
+ hscb->dataptr = sg->addr;
hscb->datacnt = sg->len;
hscb->sgptr = scb->sg_list_phys | SG_FULL_RESID;
hscb->sgptr = ahc_htole32(hscb->sgptr);
ahc_assert_atn(ahc);
break;
}
- case SEND_REJECT:
+ case SEND_REJECT:
{
u_int rejbyte = ahc_inb(ahc, ACCUM);
printk("%s:%c:%d: Warning - unknown message received from "
- "target (0x%x). Rejecting\n",
+ "target (0x%x). Rejecting\n",
ahc_name(ahc), devinfo.channel, devinfo.target, rejbyte);
- break;
+ break;
}
case PROTO_VIOLATION:
{
ahc->msg_type =
MSG_TYPE_TARGET_MSGOUT;
ahc->msgin_index = 0;
- }
- else
+ } else
ahc_setup_target_msgin(ahc,
&devinfo,
scb);
if (scb != NULL)
ahc_set_transaction_status(scb,
CAM_UNCOR_PARITY);
- ahc_reset_channel(ahc, devinfo.channel,
+ ahc_reset_channel(ahc, devinfo.channel,
/*init reset*/TRUE);
}
} else {
printk("data overrun detected %s."
" Tag == 0x%x.\n",
ahc_phase_table[i].phasemsg,
- scb->hscb->tag);
+ scb->hscb->tag);
ahc_print_path(ahc, scb);
printk("%s seen Data Phase. Length = %ld. NumSGs = %d.\n",
ahc_inb(ahc, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't",
printk("sg[%d] - Addr 0x%x%x : Length %d\n",
i,
(ahc_le32toh(scb->sg_list[i].len) >> 24
- & SG_HIGH_ADDR_BITS),
+ & SG_HIGH_ADDR_BITS),
ahc_le32toh(scb->sg_list[i].addr),
ahc_le32toh(scb->sg_list[i].len)
& AHC_SG_LEN_MASK);
if (status == 0 && status0 == 0) {
if ((ahc->features & AHC_TWIN) != 0) {
/* Try the other channel */
- ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
+ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
status = ahc_inb(ahc, SSTAT1)
& (SELTO|SCSIRSTI|BUSFREE|SCSIPERR);
intr_channel = (cur_channel == 'A') ? 'B' : 'A';
printk("%s: Someone reset channel %c\n",
ahc_name(ahc), intr_channel);
if (intr_channel != cur_channel)
- ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
+ ahc_outb(ahc, SBLKCTL, ahc_inb(ahc, SBLKCTL) ^ SELBUSB);
ahc_reset_channel(ahc, intr_channel, /*Initiate Reset*/FALSE);
} else if ((status & SCSIPERR) != 0) {
/*
}
/*
- * We've set the hardware to assert ATN if we
- * get a parity error on "in" phases, so all we
+ * We've set the hardware to assert ATN if we
+ * get a parity error on "in" phases, so all we
* need to do is stuff the message buffer with
* the appropriate message. "In" phases have set
* mesg_out to something other than MSG_NOP.
| (ahc_inb(ahc, SEQADDR1) << 8);
/*
- * Seqaddr represents the next instruction to execute,
+ * Seqaddr represents the next instruction to execute,
* so we are really executing the instruction just
* before it.
*/
seqaddr -= 1;
cs = ahc->critical_sections;
for (i = 0; i < ahc->num_critical_sections; i++, cs++) {
-
if (cs->begin < seqaddr && cs->end >= seqaddr)
break;
}
CLRREQINIT);
ahc_flush_device_writes(ahc);
ahc_outb(ahc, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO);
- ahc_flush_device_writes(ahc);
+ ahc_flush_device_writes(ahc);
ahc_outb(ahc, CLRINT, CLRSCSIINT);
ahc_flush_device_writes(ahc);
}
printk("sg[%d] - Addr 0x%x%x : Length %d\n",
i,
(ahc_le32toh(scb->sg_list[i].len) >> 24
- & SG_HIGH_ADDR_BITS),
+ & SG_HIGH_ADDR_BITS),
ahc_le32toh(scb->sg_list[i].addr),
ahc_le32toh(scb->sg_list[i].len));
}
*/
if (role == ROLE_TARGET)
transinfo = &tinfo->user;
- else
+ else
transinfo = &tinfo->goal;
*ppr_options &= transinfo->ppr_options;
if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) {
{
struct scsi_device *sdev = cmd->device;
- ahc_platform_set_tags(ahc, sdev, devinfo, alg);
- ahc_send_async(ahc, devinfo->channel, devinfo->target,
- devinfo->lun, AC_TRANSFER_NEG);
+ ahc_platform_set_tags(ahc, sdev, devinfo, alg);
+ ahc_send_async(ahc, devinfo->channel, devinfo->target,
+ devinfo->lun, AC_TRANSFER_NEG);
}
/*
role = ROLE_INITIATOR;
if (role == ROLE_TARGET
- && (ahc->features & AHC_MULTI_TID) != 0
- && (ahc_inb(ahc, SEQ_FLAGS)
- & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) {
+ && (ahc->features & AHC_MULTI_TID) != 0
+ && (ahc_inb(ahc, SEQ_FLAGS)
+ & (CMDPHASE_PENDING|TARG_CMD_PENDING|NO_DISCONNECT)) != 0) {
/* We were selected, so pull our id from TARGIDIN */
our_id = ahc_inb(ahc, TARGIDIN) & OID;
} else if ((ahc->features & AHC_ULTRA2) != 0)
#endif
ahc_assert_atn(ahc);
}
- } else
+ } else
ahc->msgin_index++;
if (message_done == MSGLOOP_TERMINATED) {
*/
return;
}
-
+
ahc->msgin_index++;
/*
ahc_set_syncrate(ahc, devinfo, /*syncrate*/NULL,
/*period*/0, /*offset*/0, /*ppr_options*/0,
AHC_TRANS_CUR, /*paused*/TRUE);
-
+
if (status != CAM_SEL_TIMEOUT)
ahc_send_async(ahc, devinfo->channel, devinfo->target,
CAM_LUN_WILDCARD, AC_SENT_BDR);
struct scb *scb)
{
- /*
+ /*
* To facilitate adding multiple messages together,
* each routine should increment the index and len
* variables instead of setting them explicitly.
- */
+ */
ahc->msgout_index = 0;
ahc->msgout_len = 0;
ahc->unpause = ahc_inb(ahc, HCNTRL) & IRQMS;
else
ahc->unpause = 0;
- ahc->pause = ahc->unpause | PAUSE;
+ ahc->pause = ahc->unpause | PAUSE;
/* XXX The shared scb data stuff should be deprecated */
if (ahc->scb_data == NULL) {
ahc->scb_data = kzalloc(sizeof(*ahc->scb_data), GFP_ATOMIC);
u_int sxfrctl1_a, sxfrctl1_b;
int error;
int wait;
-
+
/*
* Preserve the value of the SXFRCTL1 register for all channels.
* It contains settings that affect termination and we don't want
*/
error = ahc->bus_chip_init(ahc);
#ifdef AHC_DUMP_SEQ
- else
+ else
ahc_dumpseq(ahc);
#endif
/* Set the next pointer */
if ((ahc->flags & AHC_PAGESCBS) != 0)
ahc_outb(ahc, SCB_NEXT, i+1);
- else
+ else
ahc_outb(ahc, SCB_NEXT, SCB_LIST_NULL);
/* Make the tag number, SCSIID, and lun invalid */
/*
* Note that we were successful
*/
- return (0);
+ return (0);
error_exit:
len = sprintf(buf, "%s: ", ahc_chip_names[ahc->chip & AHC_CHIPID_MASK]);
buf += len;
if ((ahc->features & AHC_TWIN) != 0)
- len = sprintf(buf, "Twin Channel, A SCSI Id=%d, "
+ len = sprintf(buf, "Twin Channel, A SCSI Id=%d, "
"B SCSI Id=%d, primary %c, ",
ahc->our_id, ahc->our_id_b,
(ahc->flags & AHC_PRIMARY_CHANNEL) + 'A');
ahc_outb(ahc, CMDSIZE_TABLE + 5, 11);
ahc_outb(ahc, CMDSIZE_TABLE + 6, 0);
ahc_outb(ahc, CMDSIZE_TABLE + 7, 0);
-
+
if ((ahc->features & AHC_HS_MAILBOX) != 0)
ahc_outb(ahc, HS_MAILBOX, 0);
*/
if ((ahc->flags & AHC_USEDEFAULTS) != 0)
ahc->our_id = ahc->our_id_b = 7;
-
+
/*
* Default to allowing initiator operations.
*/
* DMA tag for our command fifos and other data in system memory
* the card's sequencer must be able to access. For initiator
* roles, we need to allocate space for the qinfifo and qoutfifo.
- * The qinfifo and qoutfifo are composed of 256 1 byte elements.
+ * The qinfifo and qoutfifo are composed of 256 1 byte elements.
* When providing for the target mode role, we must additionally
* provide space for the incoming target command fifo and an extra
* byte to deal with a dma bug in some chip versions.
&& (ahc->flags & AHC_INITIATORROLE) != 0)
ahc->flags |= AHC_RESET_BUS_A;
- ultraenb = 0;
+ ultraenb = 0;
tagenable = ALL_TARGETS_MASK;
/* Grab the disconnection disable table and invert it for our needs */
&& (ultraenb & mask) != 0) {
/* Treat 10MHz as a non-ultra speed */
scsirate &= ~SXFR;
- ultraenb &= ~mask;
+ ultraenb &= ~mask;
}
- tinfo->user.period =
+ tinfo->user.period =
ahc_find_period(ahc, scsirate,
(ultraenb & mask)
? AHC_SYNCRATE_ULTRA
{
ahc_reset(ahc, /*reinit*/TRUE);
- ahc_intr_enable(ahc, TRUE);
+ ahc_intr_enable(ahc, TRUE);
ahc_restart(ahc);
return (0);
}
if ((ahc->flags & AHC_SCB_BTT) != 0) {
u_int saved_scbptr;
-
+
saved_scbptr = ahc_inb(ahc, SCBPTR);
ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
scbid = ahc_inb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl));
if ((ahc->flags & AHC_SCB_BTT) != 0) {
u_int saved_scbptr;
-
+
saved_scbptr = ahc_inb(ahc, SCBPTR);
ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
ahc_outb(ahc, SCB_64_BTT+TCL_TARGET_OFFSET(tcl), SCB_LIST_NULL);
if ((ahc->flags & AHC_SCB_BTT) != 0) {
u_int saved_scbptr;
-
+
saved_scbptr = ahc_inb(ahc, SCBPTR);
ahc_outb(ahc, SCBPTR, TCL_LUN(tcl));
ahc_outb(ahc, SCB_64_BTT + TCL_TARGET_OFFSET(tcl), scbid);
target = SCB_GET_TARGET(ahc, scb);
lun = SCB_GET_LUN(scb);
channel = SCB_GET_CHANNEL(ahc, scb);
-
+
ahc_search_qinfifo(ahc, target, channel, lun,
/*tag*/SCB_LIST_NULL, ROLE_UNKNOWN,
CAM_REQUEUE_REQ, SEARCH_COMPLETE);
ahc_outb(ahc, NEXT_QUEUED_SCB, scb->hscb->tag);
} else {
prev_scb->hscb->next = scb->hscb->tag;
- ahc_sync_scb(ahc, prev_scb,
+ ahc_sync_scb(ahc, prev_scb,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
}
ahc->qinfifo[ahc->qinfifonext++] = scb->hscb->tag;
break;
}
} else {
-
prev = next;
next = ahc_inb(ahc, SCB_NEXT);
}
/* update the waiting list */
if (prev == SCB_LIST_NULL) {
/* First in the list */
- ahc_outb(ahc, WAITING_SCBH, next);
+ ahc_outb(ahc, WAITING_SCBH, next);
/*
* Ensure we aren't attempting to perform
ahc_outb(ahc, SCSISEQ, (ahc_inb(ahc, SCSISEQ) & ~ENSELO));
} else {
/*
- * Select the scb that pointed to us
+ * Select the scb that pointed to us
* and update its next pointer.
*/
ahc_outb(ahc, SCBPTR, prev);
/*
* Remainder of the SG where the transfer
- * stopped.
+ * stopped.
*/
resid = ahc_le32toh(spkt->residual_datacnt) & AHC_SG_LEN_MASK;
sg = ahc_sg_bus_to_virt(scb, resid_sgptr & SG_PTR_MASK);
if (begin_set[cs_count] == TRUE
&& end_set[cs_count] == FALSE) {
cs_table[cs_count].end = downloaded;
- end_set[cs_count] = TRUE;
+ end_set[cs_count] = TRUE;
cs_count++;
}
continue;
printed_mask == 0 ? ":(" : "|",
table[entry].name);
printed_mask |= table[entry].mask;
-
break;
}
if (entry >= num_entries)
scb_index = ahc_inb(ahc, SCB_NEXT);
}
printk("\n");
-
+
ahc_sync_qoutfifo(ahc, BUS_DMASYNC_POSTREAD);
printk("QOUTFIFO entries: ");
qoutpos = ahc->qoutfifonext;
if ((ahc->features & AHC_MULTIROLE) != 0) {
if ((ahc->features & AHC_MULTI_TID) != 0
- && (ahc->flags & AHC_INITIATORROLE) != 0) {
+ && (ahc->flags & AHC_INITIATORROLE) != 0) {
/*
* Only allow additional targets if
* the initiator role is disabled.
targid_mask |= target_mask;
ahc_outb(ahc, TARGID, targid_mask);
ahc_outb(ahc, TARGID+1, (targid_mask >> 8));
-
ahc_update_scsiid(ahc, targid_mask);
} else {
u_int our_id;
}
ahc_lock(ahc, &s);
-
+
ccb->ccb_h.status = CAM_REQ_CMP;
LIST_FOREACH(scb, &ahc->pending_scbs, pending_links) {
struct ccb_hdr *ccbh;
targid_mask &= ~target_mask;
ahc_outb(ahc, TARGID, targid_mask);
ahc_outb(ahc, TARGID+1,
- (targid_mask >> 8));
+ (targid_mask >> 8));
ahc_update_scsiid(ahc, targid_mask);
}
}
ahc_outb(ahc, HS_MAILBOX, hs_mailbox);
} else {
if (!paused)
- ahc_pause(ahc);
+ ahc_pause(ahc);
ahc_outb(ahc, KERNEL_TQINPOS,
ahc->tqinfifonext & HOST_TQINPOS);
if (!paused)
printk("Reserved or VU command code type encountered\n");
break;
}
-
+
memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len);
atio->ccb_h.status |= CAM_CDB_RECVD;
static void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
static int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
- struct ahc_dma_seg *sg,
+ struct ahc_dma_seg *sg,
dma_addr_t addr, bus_size_t len);
static void
target_offset = starget->id;
if (starget->channel != 0)
target_offset += 8;
-
+
if (starget->channel)
our_id = ahc->our_id_b;
ultra = 0;
flags &= ~CFXFER;
}
-
+
if ((ahc->features & AHC_ULTRA2) != 0) {
scsirate = (flags & CFXFER) | (ultra ? 0x8 : 0);
} else {
scsirate = (flags & CFXFER) << 4;
- maxsync = ultra ? AHC_SYNCRATE_ULTRA :
+ maxsync = ultra ? AHC_SYNCRATE_ULTRA :
AHC_SYNCRATE_FAST;
}
spi_max_width(starget) = (flags & CFWIDEB) ? 1 : 0;
if (!(flags & CFSYNCH))
spi_max_offset(starget) = 0;
- spi_min_period(starget) =
+ spi_min_period(starget) =
ahc_find_period(ahc, scsirate, maxsync);
}
ahc_compile_devinfo(&devinfo, our_id, starget->id,
* a tagged queuing capable device.
*/
dev->maxtags = 0;
-
+
spi_period(starget) = 0;
return 0;
starget = ahc->platform_data->starget[i];
if (starget != NULL) {
ahc->platform_data->starget[i] = NULL;
- }
- }
+ }
+ }
if (ahc->platform_data->irq != AHC_LINUX_NOIRQ)
free_irq(ahc->platform_data->irq, ahc);
default:
case AHC_QUEUE_NONE:
now_queuing = 0;
- break;
+ break;
case AHC_QUEUE_BASIC:
now_queuing = AHC_DEV_Q_BASIC;
break;
hscb->scsioffset = tinfo->curr.offset;
if ((tstate->ultraenb & mask) != 0)
hscb->control |= ULTRAENB;
-
+
if ((ahc->user_discenable & mask) != 0)
hscb->control |= DISCENB;
-
+
if ((tstate->auto_negotiate & mask) != 0) {
scb->flags |= SCB_AUTO_NEGOTIATE;
scb->hscb->control |= MK_MESSAGE;
*/
scb->hscb->sgptr =
ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
-
+
/*
* Copy the first SG into the "current"
* data pointer area.
dev->commands_issued++;
if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0)
dev->commands_since_idle_or_otag++;
-
+
scb->flags |= SCB_ACTIVE;
if (untagged_q) {
TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
int ours;
ahc = (struct ahc_softc *) dev_id;
- ahc_lock(ahc, &flags);
+ ahc_lock(ahc, &flags);
ours = ahc_intr(ahc);
ahc_unlock(ahc, &flags);
return IRQ_RETVAL(ours);
spi_display_xfer_agreement(starget);
break;
}
- case AC_SENT_BDR:
+ case AC_SENT_BDR:
{
WARN_ON(lun != CAM_LUN_WILDCARD);
scsi_report_device_reset(ahc->platform_data->host,
channel - 'A', target);
break;
}
- case AC_BUS_RESET:
+ case AC_BUS_RESET:
if (ahc->platform_data->host != NULL) {
scsi_report_bus_reset(ahc->platform_data->host,
channel - 'A');
}
- break;
- default:
- panic("ahc_send_async: Unexpected async event");
- }
+ break;
+ default:
+ panic("ahc_send_async: Unexpected async event");
+ }
}
/*
sdev->sdev_target->id, sdev->lun,
sdev->sdev_target->channel == 0 ? 'A' : 'B',
ROLE_INITIATOR);
-
+
/*
* We don't currently trust the mid-layer to
* properly deal with queue full or busy. So,
/* Any SCB for this device will do for a target reset */
LIST_FOREACH(pending_scb, &ahc->pending_scbs, pending_links) {
- if (ahc_match_scb(ahc, pending_scb, scmd_id(cmd),
+ if (ahc_match_scb(ahc, pending_scb, scmd_id(cmd),
scmd_channel(cmd) + 'A',
CAM_LUN_WILDCARD,
SCB_LIST_NULL, ROLE_INITIATOR))
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_tmode_tstate *tstate;
- struct ahc_initiator_tinfo *tinfo
+ struct ahc_initiator_tinfo *tinfo
= ahc_fetch_transinfo(ahc,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
ppr_options &= MSG_EXT_PPR_QAS_REQ;
}
- syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
+ syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
+ AHC_SYNCRATE_DT);
ahc_lock(ahc, &flags);
ahc_set_syncrate(ahc, &devinfo, syncrate, period, offset,
ppr_options, AHC_TRANS_GOAL, FALSE);
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_tmode_tstate *tstate;
- struct ahc_initiator_tinfo *tinfo
+ struct ahc_initiator_tinfo *tinfo
= ahc_fetch_transinfo(ahc,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
if (offset != 0) {
- syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
+ syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
+ AHC_SYNCRATE_DT);
period = tinfo->goal.period;
ppr_options = tinfo->goal.ppr_options;
}
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_tmode_tstate *tstate;
- struct ahc_initiator_tinfo *tinfo
+ struct ahc_initiator_tinfo *tinfo
= ahc_fetch_transinfo(ahc,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
- syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,AHC_SYNCRATE_DT);
+ syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
+ AHC_SYNCRATE_DT);
ahc_lock(ahc, &flags);
ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset,
ppr_options, AHC_TRANS_GOAL, FALSE);
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_tmode_tstate *tstate;
- struct ahc_initiator_tinfo *tinfo
+ struct ahc_initiator_tinfo *tinfo
= ahc_fetch_transinfo(ahc,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
- syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
+ syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
+ AHC_SYNCRATE_DT);
ahc_lock(ahc, &flags);
ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset,
ppr_options, AHC_TRANS_GOAL, FALSE);
struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
struct ahc_softc *ahc = *((struct ahc_softc **)shost->hostdata);
struct ahc_tmode_tstate *tstate;
- struct ahc_initiator_tinfo *tinfo
+ struct ahc_initiator_tinfo *tinfo
= ahc_fetch_transinfo(ahc,
starget->channel + 'A',
shost->this_id, starget->id, &tstate);
ahc_compile_devinfo(&devinfo, shost->this_id, starget->id, 0,
starget->channel + 'A', ROLE_INITIATOR);
- syncrate = ahc_find_syncrate(ahc, &period, &ppr_options, AHC_SYNCRATE_DT);
+ syncrate = ahc_find_syncrate(ahc, &period, &ppr_options,
+ AHC_SYNCRATE_DT);
ahc_lock(ahc, &flags);
ahc_set_syncrate(ahc, &devinfo, syncrate, period, tinfo->goal.offset,
ppr_options, AHC_TRANS_GOAL, FALSE);
if (!(ahc->features & AHC_ULTRA2)) {
/* non-LVD chipset, may not have SBLKCTL reg */
- spi_signalling(shost) =
+ spi_signalling(shost) =
ahc->features & AHC_HVD ?
SPI_SIGNAL_HVD :
SPI_SIGNAL_SE;
int active;
/*
- * The currently allowed number of
+ * The currently allowed number of
* transactions that can be queued to
* the device. Must be signed for
* conversion from tagged to untagged
* device's queue is halted.
*/
u_int qfrozen;
-
+
/*
* Cumulative command counter.
*/
/*
* Fields accessed from interrupt context.
*/
- struct scsi_target *starget[AHC_NUM_TARGETS];
+ struct scsi_target *starget[AHC_NUM_TARGETS];
spinlock_t spin_lock;
u_int qfrozen;
struct completion *eh_done;
- struct Scsi_Host *host; /* pointer to scsi host */
+ struct Scsi_Host *host; /* pointer to scsi host */
#define AHC_LINUX_NOIRQ ((uint32_t)~0)
uint32_t irq; /* IRQ for this adapter */
uint32_t bios_address;
- resource_size_t mem_busaddr; /* Mem Base Addr */
+ resource_size_t mem_busaddr; /* Mem Base Addr */
};
void ahc_delay(long);
ahc_freeze_scb(struct scb *scb)
{
if ((scb->io_ctx->result & (CAM_DEV_QFRZN << 16)) == 0) {
- scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
- scb->platform_data->dev->qfrozen++;
- }
+ scb->io_ctx->result |= CAM_DEV_QFRZN << 16;
+ scb->platform_data->dev->qfrozen++;
+ }
}
void ahc_platform_set_tags(struct ahc_softc *ahc, struct scsi_device *sdev,
u_int freq;
u_int mb;
- speed = 3300;
- freq = 0;
+ speed = 3300;
+ freq = 0;
if (tinfo->offset != 0) {
freq = ahc_calc_syncsrate(tinfo->period);
speed = freq;
}
speed *= (0x01 << tinfo->width);
- mb = speed / 1000;
- if (mb > 0)
+ mb = speed / 1000;
+ if (mb > 0)
seq_printf(m, "%d.%03dMB/s transfers", mb, speed % 1000);
- else
+ else
seq_printf(m, "%dKB/s transfers", speed);
if (freq != 0) {
if ((ahc->chip & AHC_VL) != 0) {
sd.sd_control_offset = SEECTL_2840;
sd.sd_status_offset = STATUS_2840;
- sd.sd_dataout_offset = STATUS_2840;
+ sd.sd_dataout_offset = STATUS_2840;
sd.sd_chip = C46;
sd.sd_MS = 0;
sd.sd_RDY = EEPROM_TF;
u_int start_addr;
if (ahc->seep_config == NULL) {
- ahc->seep_config = kmalloc(sizeof(*ahc->seep_config), GFP_ATOMIC);
+ ahc->seep_config = kmalloc(sizeof(*ahc->seep_config),
+ GFP_ATOMIC);
if (ahc->seep_config == NULL) {
printk("aic7xxx: Unable to allocate serial "
"eeprom buffer. Write failing\n");