.packageName <- "survival"
#SCCS @(#)Surv.s	5.5 07/09/00
# Package up surivival type data as a structure
#
Surv <- function(time, time2, event,
	      type=c('right', 'left', 'interval', 'counting', 'interval2'),
		       origin=0) {
    nn <- length(time)
    ng <- nargs()
    if (missing(type)) {
	if (ng==1 || ng==2) type <- 'right'
	else if (ng==3)     type <- 'counting'
	else stop("Invalid number of arguments")
	}
    else {
	type <- match.arg(type)
	ng <- ng-1
	if (ng!=3 && (type=='interval' || type =='counting'))
		stop("Wrong number of args for this type of survival data")
	if (ng!=2 && (type=='right' || type=='left' ||  type=='interval2'))
		stop("Wrong number of args for this type of survival data")
	}
    who <- !is.na(time)

    if (ng==1) {
	if (!is.numeric(time)) stop ("Time variable is not numeric")
	ss <- cbind(time, 1)
	dimnames(ss) <- list(NULL, c("time", "status"))
	}
    else if (type=='right' || type=='left') {
	if (!is.numeric(time)) stop ("Time variable is not numeric")
	if (length(time2) != nn) stop ("Time and status are different lengths")
	if (is.logical(time2)) status <- 1*time2
	    else  if (is.numeric(time2)) {
		who2 <- !is.na(time2)
		if (max(time2[who2]) ==2) status <- time2 -1
		else status <- time2
		if (any(status[who2] !=0  & status[who2]!=1))
				stop ("Invalid status value")
		}
	    else stop("Invalid status value")
	 ss <- cbind(time, status)
	 dimnames(ss) <- list(NULL, c("time", "status"))
	}
    else  if (type=='counting') {
	if (length(time2) !=nn) stop ("Start and stop are different lengths")
	if (length(event)!=nn) stop ("Start and event are different lengths")
	if (!is.numeric(time))stop("Start time is not numeric")
	if (!is.numeric(time2)) stop("Stop time is not numeric")
	who3 <- who & !is.na(time2)
	if (any (time[who3]>= time2[who3]))stop("Stop time must be > start time")
	if (is.logical(event)) status <- 1*event
	    else  if (is.numeric(event)) {
		who2 <- !is.na(event)
		if (max(event[who2])==2) status <- event - 1
		else status <- event
		if (any(status[who2] !=0  & status[who2]!=1))
				stop("Invalid status value")
		}
	    else stop("Invalid status value")
	ss <- cbind(time-origin, time2-origin, status)
	}

    else {  #interval censored data
	if (type=='interval2') {
	    event <- ifelse(is.na(time), 2,
		     ifelse(is.na(time2),0,
		     ifelse(time==time2, 1,3)))
	    if (any(time[event==3] > time2[event==3]))
		stop("Invalid interval: start > stop")
	    time <- ifelse(event!=2, time, time2)
	    type <- 'interval'
	    }
	else {
	    temp <- event[!is.na(event)]
	    if (!is.numeric(temp)) stop("Status indicator must be numeric")
	    if (length(temp)>0 && any(temp!= floor(temp) | temp<0 | temp>3))
		stop("Status indicator must be 0, 1, 2 or 3")
	    }
	status <- event
	ss <- cbind(time, ifelse(!is.na(event) & event==3, time2, 1),
			    status)
	}

    attr(ss, "type")  <- type
    class(ss) <- 'Surv'
    ss
    }

print.Surv <- function(x, quote=FALSE, ...)
    invisible(print(as.character.Surv(x), quote=quote, ...))

as.character.Surv <- function(xx) {
    class(xx) <- NULL
    type <- attr(xx, 'type')
    if (type=='right') {
	temp <- xx[,2]
	temp <- ifelse(is.na(temp), "?", ifelse(temp==0, "+"," "))
	paste(format(xx[,1]), temp, sep='')
	}
    else if (type=='counting') {
	temp <- xx[,3]
	temp <- ifelse(is.na(temp), "?", ifelse(temp==0, "+"," "))
	paste('(', format(xx[,1]), ',', format(xx[,2]), temp,
			 ']', sep='')
	}
    else if (type=='left') {
	temp <- xx[,2]
	temp <- ifelse(is.na(temp), "?", ifelse(temp==0, "<"," "))
	paste(temp, format(xx[,1]), sep='')
	}
    else {   #interval type
	stat <- xx[,3]
	temp <- c("+", "", "-", "]")[stat+1]
	temp2 <- ifelse(stat==3,
			 paste("[", format(xx[,1]), ", ",format(xx[,2]), sep=''),
			 format(xx[,1]))
	ifelse(is.na(stat), as.character(NA), paste(temp2, temp, sep=''))
	}
    }

## the ... handling here works in R1.2 but not R1.1.1
##
##"[.Surv" <- function(x, ..., drop=F) {
##    # If only 1 subscript is given, the result will still be a Surv object
##    #  If the second is given extract the relevant columns as a matrix
##    if (missing(..2)) {
##	temp <- class(x)
##	type <- attr(x, "type")
##	class(x) <- NULL
##	x <- x[..., drop=F]
##	class(x) <- temp
##	attr(x, "type") <- type
##	x
##	}
##    else {
##	class(x) <- NULL
##	NextMethod("[")
##	}
##    }

"[.Surv" <- function(x, i,j, drop=FALSE) {
    # If only 1 subscript is given, the result will still be a Surv object
    #  If the second is given extract the relevant columns as a matrix
    if (missing(j)) {
	temp <- class(x)
	type <- attr(x, "type")
	class(x) <- NULL
	x <- x[i, , drop=FALSE]
	class(x) <- temp
	attr(x, "type") <- type
	x
	}
    else {
	class(x) <- NULL
	NextMethod("[")
	}
    }

is.na.Surv <- function(x) {
    as.vector( (1* is.na(unclass(x)))%*% rep(1, ncol(x)) >0)
    }

Math.Surv <- function(...)  stop("Invalid operation on a survival time")
Ops.Surv  <- function(...)  stop("Invalid operation on a survival time")
Summary.Surv<-function(...) stop("Invalid operation on a survival time")
is.Surv <- function(x) inherits(x, 'Surv')
#SCCS @(#)agexact.fit.s	4.20 06/24/99
agexact.fit <- function(x, y, strata, offset, init, control,
			  weights, method, rownames)
    {
    if (!is.matrix(x)) stop("Invalid formula for cox fitting function")
    if (!is.null(weights) && any(weights!=1))
	  stop("Case weights are not supported for the exact method")
    n <- nrow(x)
    nvar <- ncol(x)
    if (ncol(y)==3) {
	start <- y[,1]
	stopp <- y[,2]
	event <- y[,3]
	}
    else {
	start <- rep(0,n)
	stopp <- y[,1]
	event <- y[,2]
	}

    # Sort the data (or rather, get a list of sorted indices)
    if (length(strata)==0) {
	sorted <- order(stopp, -event)
	newstrat <- as.integer(rep(0,n))
	}
    else {
	sorted <- order(strata, stopp, -event)
	strata <- (as.numeric(strata))[sorted]
	newstrat <- as.integer(c(1*(diff(strata)!=0), 1))
	}
    if (is.null(offset)) offset <- rep(0,n)

    sstart <- as.double(start[sorted])
    sstop <- as.double(stopp[sorted])
    sstat <- as.integer(event[sorted])

    if (is.null(nvar)) {
	# A special case: Null model.  Not worth coding up
	stop("Cannot handle a null model + exact calculation (yet)")
	}

    if (!is.null(init)) {
	if (length(init) != nvar) stop("Wrong length for inital values")
	}
    else init <- rep(0,nvar)

    agfit <- .C("agexact", iter= as.integer(control$iter.max),
		   as.integer(n),
		   as.integer(nvar), sstart, sstop,
		   sstat,
		   x= x[sorted,],
		   as.double(offset[sorted] - mean(offset)),
		   newstrat,
		   means = double(nvar),
		   coef= as.double(init),
		   u = double(nvar),
		   imat= double(nvar*nvar), loglik=double(2),
		   flag=integer(1),
		   double(2*nvar*nvar +nvar*4 + n),
		   integer(2*n),
		   as.double(control$eps),
		   as.double(control$toler.chol),
		   sctest=double(1),PACKAGE="survival" )

    var <- matrix(agfit$imat,nvar,nvar)
    coef <- agfit$coef
    if (agfit$flag < nvar) which.sing <- diag(var)==0
    else which.sing <- rep(FALSE,nvar)

    infs <- abs(agfit$u %*% var)
    if (control$iter.max >1) {
	if (agfit$flag == 1000)
	       warning("Ran out of iterations and did not converge")
	    else {
		infs <- ((infs > control$eps) & 
			 infs > control$toler.inf*abs(coef))
		if (any(infs))
		warning(paste("Loglik converged before variable ",
			  paste((1:nvar)[infs],collapse=","),
			  "; beta may be infinite. "))
		}
	}

    names(coef) <- dimnames(x)[[2]]
    lp  <- x %*% coef + offset - sum(coef *agfit$means)
    score <- as.double(exp(lp[sorted]))
    agres <- .C("agmart",
		   as.integer(n),
		   as.integer(0),
		   sstart, sstop,
		   sstat,
		   score,
		   rep(1.0, n),
		   newstrat,
		   resid=double(n),PACKAGE="survival")
    resid <- double(n)
    resid[sorted] <- agres$resid
    names(resid) <- rownames
    coef[which.sing] <- NA

    list(coefficients  = coef,
		var    = var,
		loglik = agfit$loglik,
		score  = agfit$sctest,
		iter   = agfit$iter,
		linear.predictors = lp,
		residuals = resid,
		means = agfit$means,
		method= 'coxph')
    }
# SCCS @(#)agreg.fit.s	4.22 06/12/00
agreg.fit <- function(x, y, strata, offset, init, control,
			weights, method, rownames)
    {
    n <- nrow(y)
    nvar <- ncol(x)
    start <- y[,1]
    stopp <- y[,2]
    event <- y[,3]
    if(all(event==0)) stop("Can't fit a Cox model with zero failures")

    # Sort the data (or rather, get a list of sorted indices)
    #  For both stop and start times, the indices go from last to first
    if (length(strata)==0) {
	sort.end  <- order(-stopp, event)
	sort.start<- order(-start)
	newstrat  <- n
	}
    else {
	sort.end  <- order(strata, -stopp, event)
	sort.start<- order(strata, -start)
	newstrat  <- cumsum(table(strata))
	}
    if (missing(offset) || is.null(offset)) offset <- rep(0.0, n)
    if (missing(weights)|| is.null(weights))weights<- rep(1.0, n)
    else if (any(weights<=0)) stop("Invalid weights, must be >0")

    if (is.null(nvar)) {
	# A special case: Null model.  Just return obvious stuff
        #  To keep the C code to a small set, we call the usual routines, but
	#  with a dummy X matrix and 0 iterations
	nvar <- 1
	x <- matrix(1:n, ncol=1)
	maxiter <- 0
	nullmodel <- TRUE
	}
    else {
	nullmodel <- FALSE
	maxiter <- control$iter.max
	}

    if (!is.null(init)) {
	if (length(init) != nvar) stop("Wrong length for inital values")
	}
	else init <- rep(0,nvar)
    agfit <- .C("agfit3", iter= as.integer(maxiter),
		as.integer(n),
		as.integer(nvar), 
		as.double(start), 
		as.double(stopp),
		as.integer(event),
		as.double(x),
		as.double(offset - mean(offset)),
		as.double(weights),
		as.integer(length(newstrat)),
		as.integer(newstrat),
		as.integer(sort.end-1),
		as.integer(sort.start-1),
		means = double(nvar),
		coef= as.double(init),
		u = double(nvar),
		imat= double(nvar*nvar), loglik=double(2),
		flag=integer(1),
		double(2*nvar*nvar +nvar*3 + n),
		as.double(control$eps),
		as.double(control$toler.chol),
		sctest=as.double(method=='efron'),PACKAGE="survival" )

    var <- matrix(agfit$imat,nvar,nvar)
    coef <- agfit$coef
    if (agfit$flag < nvar) which.sing <- diag(var)==0
	else which.sing <- rep(FALSE,nvar)

    infs <- abs(agfit$u %*% var)
    if (maxiter >1) {
	if (agfit$flag == 1000)
		warning("Ran out of iterations and did not converge")
	    else {
		infs <- ((infs > control$eps) & 
			 infs > control$toler.inf*abs(coef))
		if (any(infs))
			warning(paste("Loglik converged before variable ",
				      paste((1:nvar)[infs],collapse=","),
				      "; beta may be infinite. "))
		}
	}
    lp  <- x %*% coef + offset - sum(coef *agfit$means)
    score <- as.double(exp(lp))

    agres <- .C("agmart2",
		as.integer(n),
		as.integer(method=='efron'),
		as.double(start), 
		as.double(stopp),
		as.integer(event),
		as.integer(length(newstrat)), 
		as.integer(newstrat),
		as.integer(sort.end-1), 
		as.integer(sort.start-1),
		score,
		as.double(weights),
		resid=double(n),
		double(2*sum(event)),PACKAGE="survival")
    resid <- agres$resid

    if (nullmodel) {
	resid <- agres$resid
	names(resid) <- rownames

	list(loglik=agfit$loglik[2],
	     linear.predictors = offset,
	     residuals = resid,
	     method= c("coxph.null", 'coxph') )
	}
    else {
	names(coef) <- dimnames(x)[[2]]
	names(resid) <- rownames
	coef[which.sing] <- NA

	list(coefficients  = coef,
	     var    = var,
	     loglik = agfit$loglik,
	     score  = agfit$sctest,
	     iter   = agfit$iter,
	     linear.predictors = as.vector(lp),
	     residuals = resid,
	     means = agfit$means,
	     method= 'coxph')
	}
    }



#setInterface('agreg2', language='C', 
#	     classes=c("


# SCCS @(#)anova.survreg.s	1.1 01/18/99
anova.survreg <- function(object, ..., test = c("Chisq", "none")) {
    test <- match.arg(test)
    margs <- function(...)
	    nargs()
    if(margs(...))
	    return(anova.survreglist(list(object, ...), test = test))
    Terms <- object$terms
    term.labels <- attr(Terms, "term.labels")
    nt <- length(term.labels)
    m <- model.frame(object)
    family.obj <- object$family
    y <- model.extract(m, "response")
    if(!inherits(y, "Surv"))
	    stop("Response must be a survival object")
    loglik <- numeric(nt + 1)
    df.res <- loglik
    if(nt) {
	loglik[nt + 1] <- -2 * object$loglik[2]
	df.res[nt + 1] <- object$df.residual 
	fit <- object
	for(iterm in seq(from = nt, to = 1, by = -1)) {
	    argslist <- list(object = fit, 
			     formula = eval(parse(text = paste("~ . -", 
						    term.labels[iterm]))))
	    fit <- do.call("update", argslist)
	    loglik[iterm] <- -2 * fit$loglik[2]
	    df.res[iterm] <- fit$df.residual
	    }
	dev <- c(NA,  - diff(loglik))
        df <- c(NA,  diff(df.res)) 
	}
    else {
	loglik[1] <- -2 * object$loglik[2]
	df.res[1] <- object$df.residual #dim(y)[1] - attr(Terms, "intercept")
	dev <- df <- as.numeric(NA)
	}
    heading <- c("Analysis of Deviance Table\n", 
		 paste(family.obj[1], "distribution with", family.obj[2], 
		       "link\n"), 
		 paste("Response: ", as.character(formula(object))[2], 
		       "\n", sep = ""),
                 if (nrow(fit$var) == length(fit$coefficients))
        paste("Scale fixed at", format(x$scale, digits = digits),"\n")
                 else "Scale estimated\n",
		 "Terms added sequentially (first to last)")
    aod <- data.frame(Df = df, Deviance = dev, "Resid. Df" = df.res, 
		      "-2*LL" = loglik, row.names = c("NULL", term.labels), 
		      check.names = FALSE)
    attr(aod, "heading") <- heading
    class(aod) <- c("anova", "data.frame")
    if(test == "none")
	    return(aod)
    else stat.anova(aod, test, scale=1,n = nrow(y))
    }
# SCCS @(#)anova.survreglist.s	1.1 01/18/99
anova.survreglist <- function(object, ..., test = c("Chisq", "none")) {
    diff.term <- function(term.labels, i)
	    {
		t1 <- term.labels[[1]]
		t2 <- term.labels[[2]]
		m1 <- match(t1, t2, FALSE)
		m2 <- match(t2, t1, FALSE)
		if(all(m1)) {
		    if(all(m2)) return("=")
		    else return(paste(c("", t2[ - m1]), collapse = "+"))
		    }
		else {
		    if(all(m2))
			 return(paste(c("", t1[ - m2]), collapse = "-"))
		    else return(paste(i - 1, i, sep = " vs. "))
		    }
		}
    test <- match.arg(test)
    rt <- length(object)
    if(rt == 1) {
	object <- object[[1]]
	UseMethod("anova")
	}
    forms <- sapply(object, function(x) as.character(formula(x)))
    subs <- as.logical(match(forms[2,  ], forms[2, 1], FALSE))
    if(!all(subs))
	    warning("Some fit objects deleted because response differs from the first model")
    if(sum(subs) == 1)
	    stop("The first model has a different response from the rest")
    forms <- forms[, subs]
    object <- object[subs]
    dfres <- sapply(object, "[[", "df.resid")
    m2loglik <- -2 * sapply(object, "[[", "loglik")[2,  ]
    tl <- lapply(object, labels)
    rt <- length(m2loglik)
    effects <- character(rt)
    for(i in 2:rt)
	    effects[i] <- diff.term(tl[c(i - 1, i)], i)
    dm2loglik <-  - diff(m2loglik)
    ddf <-  - diff(dfres)
    heading <- c("Analysis of Deviance Table", 
		 paste("\nResponse: ", forms[2, 1], "\n", sep = ""))
    aod <- data.frame(Terms = forms[3,  ], 
		      "Resid. Df" = dfres, 
		      "-2*LL" = m2loglik, 
		      Test = effects, 
		      Df = c(NA, ddf), 
		      Deviance = c(NA, dm2loglik), check.names = FALSE)
    ##aod <- as.anova(aod, heading)
    aod<-structure(aod,heading=heading,class=c("anova","data.frame"))
    if(test != "none") {
	n <- length(object[[1]]$residuals)
	o <- order(dfres)
        ## R uses scale argument even for "Chisq"
        if (test=="Chisq")
            scale<-1
        else
            scale<-deviance.lm(object[[o[1]]])/dfres[o[1]]
	stat.anova(aod, test, scale, dfres[o[1]], n)
	}
    else aod
    }







##
## redoes attr(modelmatrix,"assign") in the nice S-PLUS 3.4 format
##
attrassign<-function (object, ...) UseMethod("attrassign")

attrassign.lm<-function(object,...){
	attrassign(model.matrix(object),terms(object))}

attrassign.default<-function(object,tt,...){
        if (!inherits(tt,"terms"))
                stop("need terms object")
        aa<-attr(object,"assign")
        if (is.null(aa))
                stop("argument is not really a model matrix")
        ll<-attr(tt,"term.labels")
        if (attr(tt,"intercept")>0)
                ll<-c("(Intercept)",ll)
        aaa<-factor(aa,labels=ll)
        split(order(aa),aaa)
	}

## conditional logistic regression
##
## case~exposure+strata(matching)
##

clogit<-function(formula,data,method=c("exact","approximate"),
                 na.action=getOption("na.action"),subset=NULL,
                 control=coxph.control()){
    
    mf<-match.call()
    mf[[1]]<-as.name("model.frame")
    mf$method<-mf$control<-NULL
    mfn<-mf

    mfn$na.action<-"I"
    mfn$subset<-NULL
    nrows<-NROW(eval(mfn,parent.frame()))

    mf<-eval(mf,parent.frame())
    
    coxcall<-match.call()
    coxcall[[1]]<-as.name("coxph")
    newformula<-formula
    newformula[[2]]<-substitute(Surv(rep(1,nn),case),list(case=formula[[2]],nn=nrows))
    environment(newformula)<-environment(formula)
    coxcall$formula<-newformula
    coxcall$method<-switch(match.arg(method),exact="exact","breslow")

    coxcall<-eval(coxcall,sys.frame(sys.parent()))
    coxcall$call<-sys.call()
    
    class(coxcall)<-c("clogit","coxph")
    coxcall
}
#SCCS @(#)cluster.s	4.1 10/01/94
cluster <- function(x) x
# SCCS @(#)cox.zph.s	5.2 09/25/98
#  Test proportional hazards
#
cox.zph <- function(fit, transform='km', global=TRUE) {
    call <- match.call()
    if (!inherits(fit, 'coxph')) stop ("Argument must be the result of coxph")
    if (inherits(fit, 'coxph.null'))
	stop("The are no score residuals for a Null model")

    sresid <- resid(fit, 'schoenfeld')
    varnames <- names(fit$coef)
    nvar <- length(varnames)
    ndead<- length(sresid)/nvar
    if (nvar==1) times <- as.numeric(names(sresid))
    else         times <- as.numeric(dimnames(sresid)[[1]])

    if (missing(transform) && attr(fit$y, 'type') != 'right')
	    transform <- 'identity'
    if (is.character(transform)) {
	tname <- transform
	ttimes <- switch(transform,
			   'identity'= times,
			   'rank'    = rank(times),
			   'log'     = log(times),
			   'km' = {
				temp <- survfit.km(factor(rep(1,nrow(fit$y))),
						    fit$y, se.fit=FALSE)
				# A nuisance to do left cont KM
				t1 <- temp$surv[temp$n.event>0]
				t2 <- temp$n.event[temp$n.event>0]
				km <- rep(c(1,t1), c(t2,0))
				if (is.null(attr(sresid, 'strata')))
				    1-km
				else (1- km[sort.list(sort.list(times))])
				},
			   stop("Unrecognized transform"))
	}
    else {
	tname <- deparse(substitute(transform))
	ttimes <- transform(times)
	}
    xx <- ttimes - mean(ttimes)

    r2 <- sresid %*% fit$var * ndead
    test <- xx %*% r2        # time weighted col sums
    corel <- c(cor(xx, r2))
    z <- c(test^2 /(diag(fit$var)*ndead* sum(xx^2)))
    Z.ph <- cbind(corel, z, 1- pchisq(z,1))

    if (global && nvar>1) {
	test <- c(xx %*% sresid)
	z    <- c(test %*% fit$var %*% test) * ndead / sum(xx^2)
	Z.ph <- rbind(Z.ph, c(NA, z, 1-pchisq(z, ncol(sresid))))
	dimnames(Z.ph) <- list(c(varnames, "GLOBAL"), c("rho", "chisq", "p"))
	}
    else dimnames(Z.ph) <- list(varnames, c("rho", "chisq", "p"))

    dimnames(r2) <- list(times, names(fit$coef))
    temp <-list(table=Z.ph, x=ttimes, y=r2 + outer(rep(1,ndead), fit$coef),
    var=fit$var, call=call, transform=tname)
    class(temp) <- "cox.zph"
    temp
    }

"[.cox.zph" <- function(x, ..., drop=FALSE) {
    i <- ..1
    z<- list(table=x$table[i,,drop=FALSE], x=x$x, y=x$y[ ,i,drop=FALSE],
		var=x$var[i,i, drop=FALSE], call=x$call,
		transform=x$transform)
    attributes(z) <- attributes(x)
    z
    }
#  SCCS   @(#)coxpenal.df.s	1.2 02/21/99
#
# degrees of freedom computation, based on Bob Gray's paper
#
#  hmat = right hand slice of cholesky of H
#  hinv = right hand slice of cholesky of H-inverse
#  fdiag= diagonal of D-inverse
#  assign.list: terms information
#  ptype= 1 or 3 if a sparse term exists, 2 or 3 if a non-sparse exists
#  nvar = # of non-sparse terms
#  pen1 = the penalty matrix (diagonal thereof) for the sparse terms
#  pen2 = the penalty matrix for the non-sparse terms
#  sparse = indicates which term is the sparse one
coxpenal.df <- function(hmat, hinv, fdiag, assign.list, ptype, nvar,
			pen1, pen2, sparse) {

    if (ptype ==1 & nvar==0) {  #only sparse terms
	hdiag <- 1/fdiag
	list(fvar2=(hdiag-pen1)*fdiag^2, df=sum((hdiag-pen1)*fdiag),
	     fvar = fdiag, trH=sum(fdiag))
        }
    
    else if (ptype==2) {  # only dense ones
	hmat.full <- t(hmat) %*% (ifelse(fdiag==0, 0,1/fdiag)* hmat)
	hinv.full <- hinv %*% (fdiag* t(hinv))
	if (length(pen2)==length(hmat.full)) imat <- hmat.full - pen2
	else                                 imat <- hmat.full - diag(pen2)

	var <- hinv.full %*% imat %*% hinv.full

	if (length(assign.list)==1)
		list(var2=var, df=sum(imat * hinv.full), 
		              trH=sum(diag(hinv.full)), var=hinv.full)
	else {
	    df <- trH <- NULL
	    d2 <- diag(hinv.full)
	    for (i in assign.list) {
		temp <- coxph.wtest(hinv.full[i,i], var[i,i])$solve
		if (is.matrix(temp)) df <- c(df, sum(diag(temp)))
		else                 df <- c(df, sum(temp))
		trH<- c(trH, sum(d2[i]))
	        }
	    list(var2=var, df=df, trH=trH, var = hinv.full)
	    }
        }

    else {
	# sparse terms + other vars
	nf <- length(fdiag) - nvar
       	nr1 <- 1:nf
	nr2 <- (nf+1):(nf+nvar)

	d1 <- fdiag[nr1]
	d2 <- fdiag[nr2]
	temp <- t(hinv[nr1,])
	temp2<- t(hinv[nr2,,drop=FALSE])
	A.diag <- d1 + c(rep(1,nvar) %*% (temp^2*d2))
	B  <- hinv[nr1,] %*% (d2 * temp2)
	C  <- hinv[nr2,] %*% (d2 * temp2)  #see notation in paper
	var2 <- C - t(B) %*% (pen1 * B)

	if (ptype==3) {
	    #additional work when we have penalties on both the sparse term
	    #  and on  non-sparse terms
	    hmat.22 <- t(hmat) %*%(ifelse(fdiag==0, 0,1/fdiag)* hmat)
	    temp <- C - coxph.wtest(hmat.22, diag(nvar))$solve
	    if (nvar==1) {
		var2 <- var2 - C*pen2*C    # C will be 1 by 1
		temp2 <- c(temp*pen2)
		}
	    else if (length(pen2) == nvar) {
		var2 <- var2 - C %*% (pen2 * C)  #diagonal penalty
		temp2 <- sum(diag(temp) * pen2)
		}
	    else {
		var2 <- var2 - C %*% matrix(pen2,nvar) %*% C
		temp2 <- sum(diag(temp * pen2))
		}
	    }
	else temp2 <- 0  #temp2 contains trace[B'A^{-1}B P2], this line: P2=0

	df <- trH <- NULL
	cdiag <- diag(C)

	for (i in 1:length(assign.list)) {
	    if (sparse==i){
		df <- c(df, nf - (sum(A.diag * pen1) + temp2))
		trH <- c(trH, sum(A.diag))
	        }
	    else {
		j <- assign.list[[i]] 
		temp <- coxph.wtest(C[j,j], var2[j,j])$solve
		if (is.matrix(temp)) df <- c(df, sum(diag(temp)))
		else                 df <- c(df, sum(temp))
		trH <- c(trH, sum(cdiag[j]))
	        }
	    }
	list(var=C, df=df, trH=trH, fvar=A.diag, var2=var2)
	}
    }

# SCCS @(#)coxpenal.fit.s	1.8 06/12/00
#
# General penalized likelihood
#
coxpenal.fit <- function(x, y, strata, offset, init, control,
			weights, method, rownames, 
			pcols, pattr, assign) {
    eps <- control$eps
    n <-  nrow(y)
    if (is.matrix(x)) nvar <- ncol(x)
    else  if (length(x)==0) stop("Must have an X variable")
    else nvar <-1

    if (missing(offset) || is.null(offset)) offset <- rep(0,n)
    if (missing(weights)|| is.null(weights))weights<- rep(1,n)
    else {
	if (any(weights<=0)) stop("Invalid weights, must be >0")
	}

    # Get the list of sort indices, but don't sort the data itself
    if (ncol(y) ==3) {
	if (length(strata) ==0) {
	    sorted <- cbind(order(-y[,2], y[,3]), 
			    order(-y[,1]))
	    newstrat <- n
	    }
	else {
	    sorted <- cbind(order(strata, -y[,2], y[,3]),
			    order(strata, -y[,1]))
	    newstrat  <- cumsum(table(strata))
	    }
	status <- y[,3]
	andersen <- TRUE
	routines <- paste('agfit5', c('a', 'b', 'c'), sep='_')
        }
    else {
	if (length(strata) ==0) {
	    sorted <- order(-y[,1], y[,2])
	    newstrat <- n
	    }
	else {
	    sorted <- order(strata, -y[,1], y[,2])
	    strata <- (as.numeric(strata))[sorted]
	    newstrat <-  cumsum(table(strata))
	    }
	status <- y[,2]
	andersen <- FALSE
	routines <- paste('coxfit5', c('a', 'b', 'c'), sep='_')
        }

    n.eff <- sum(y[,ncol(y)])  #effective n for a Cox model is #events
    #
    # are there any sparse frailty terms?
    # 
    npenal <- length(pattr)
    if (npenal == 0 || length(pcols) != npenal)
	    stop("Invalid pcols or pattr arg")
    sparse <- sapply(pattr, function(x) !is.null(x$sparse) &&  x$sparse)
    if (sum(sparse) >1) stop("Only one sparse penalty term allowed")

    #
    # Create a marking vector for the terms, the same length as assign
    #    with pterms == 0=ordinary term, 1=penalized, 2=sparse,
    #    pindex = length of pcols = position in pterms
    # 
    # Make sure that pcols is a strict subset of assign, so that the
    #   df computation (and printing) can unambiguously decide which cols of
    #   X are penalized and which are not when doing "terms" like actions.
    # To make some downstream things easier, order pcols and pattr to be
    #   in the same relative order as the terms in 'assign' 
    #
    ## can't compute assign attribute in R without terms
    ## if (missing(assign)) assign <- attr(x, 'assign')[-1]
    ##Remove 'intercept'
    pterms <- rep(0, length(assign))
    names(pterms) <- names(assign)
    pindex <- rep(0, npenal)
    for (i in 1:npenal) {
	temp <- unlist(lapply(assign, function(x,y) (length(x) == length(y) &&
					     all(x==y)), pcols[[i]]))
	if (sparse[i]) pterms[temp] <- 2
	else pterms[temp] <- 1
	pindex[i] <- (seq(along=temp))[temp]
	}
    if ((sum(pterms==2) != sum(sparse)) || (sum(pterms>0) != npenal))
	    stop("pcols and assign arguments disagree")
    if (any(pindex != sort(pindex))) {
	temp <- order(pindex)
	pindex <- pindex[temp]
	pcols <- pcols[temp]
	pattr <- pattr[temp]
	}
    
    # ptype= 1 or 3 if a sparse term exists, 2 or 3 if a non-sparse exists
    ptype <- any(sparse) + 2*(any(!sparse))

    ## Make sure these get defined <TSL>
    f.expr1<-function(coef) NULL
    f.expr2<-function(coef) NULL


    if (any(sparse)) {
	sparse.attr <- (pattr[sparse])[[1]]  #can't use [[sparse]] directly
	                                     # if 'sparse' is a T/F vector
	fcol <- unlist(pcols[sparse])
	if (length(fcol) > 1) stop("Sparse term must be single column")

	# Remove the sparse term from the X matrix
	xx <- x[, -fcol, drop=FALSE]
	for (i in 1:length(assign)){
	    j <- assign[[i]]
	    if (j[1] > fcol) assign[[i]] <- j-1
	    }
	for (i in 1:npenal) {
	    j <- pcols[[i]]
	    if (j[1] > fcol) pcols[[i]] <- j-1
	    }

	frailx <- x[, fcol]
	frailx <- match(frailx, sort(unique(frailx)))
	nfrail <- max(frailx)
	nvar <- nvar - 1

	#Set up the callback for the sparse frailty term
	pfun1 <- sparse.attr$pfun
        ### In R we use a function and eval() it, not an expression
	f.expr1 <- function(coef){
	    coxlist1$coef <- coef 
	    if (is.null(extra1)) temp <- pfun1(coef, theta1, n.eff)
	    else  temp <- pfun1(coef, theta1, n.eff, extra1)

	    if (!is.null(temp$recenter)) 
		    coxlist1$coef <- coxlist1$coef - as.double(temp$recenter)
	    if (!temp$flag) {
		coxlist1$first <- -as.double(temp$first)
		coxlist1$second <- as.double(temp$second)
	        }
	    coxlist1$penalty <- -as.double(temp$penalty)
	    coxlist1$flag   <- as.logical(temp$flag)
	    if (any(sapply(coxlist1, length) != c(rep(nfrail,3), 1, 1)))
		    stop("Incorrect length in coxlist1")
	    coxlist1
        }
        if (!is.null(getOption("survdebug"))) debug(f.expr1)
        
	coxlist1 <- list(coef=double(nfrail), first=double(nfrail), 
			 second=double(nfrail), penalty=0.0, flag=FALSE)
        ## we pass f.expr1 in as an argument in R
	##.C("init_coxcall1", as.integer(sys.nframe()), expr1)
    }
    else {
	xx <- x
	frailx <- 0
	nfrail <- 0
	}

    # Now the non-sparse penalties
    if (sum(!sparse) >0) {
	full.imat <- !all(unlist(lapply(pattr, function(x) x$diag)))
	ipenal <- (1:length(pattr))[!sparse]   #index for non-sparse terms
	f.expr2 <- function(coef){
            coxlist2$coef<-coef ##<TSL>
	    pentot <- 0
	    for (i in ipenal) {
		pen.col <- pcols[[i]]
		coef <- coxlist2$coef[pen.col]
		if (is.null(extralist[[i]]))
			temp <- ((pattr[[i]])$pfun)(coef, thetalist[[i]],n.eff)
		else    temp <- ((pattr[[i]])$pfun)(coef, thetalist[[i]],
						n.eff,extralist[[i]])
		if (!is.null(temp$recenter))
		    coxlist2$coef[pen.col] <- coxlist2$coef[pen.col]- 
			                               temp$recenter
		if (temp$flag) coxlist2$flag[pen.col] <- TRUE
		else {
		    coxlist2$flag[pen.col] <- FALSE
		    coxlist2$first[pen.col] <- -temp$first
		    if (full.imat) {
			tmat <- matrix(coxlist2$second, nvar, nvar)
			tmat[pen.col,pen.col] <- temp$second
			coxlist2$second <- c(tmat)
		        }
		    else coxlist2$second[pen.col] <- temp$second
		    }
		pentot <- pentot - temp$penalty
	        }
	    coxlist2$penalty <- as.double(pentot)
	    if (any(sapply(coxlist2, length) != length2)) 
		    stop("Length error in coxlist2")
	    coxlist2
        }
        if (!is.null(getOption("survdebug")))
            debug(f.expr2)
	if (full.imat) {
	    coxlist2 <- list(coef=double(nvar), first=double(nvar), 
		    second= double(nvar*nvar), penalty=0.0, flag=rep(FALSE,nvar))
	    length2 <- c(nvar, nvar, nvar*nvar, 1, nvar)
	    }  
	else {
	    coxlist2 <- list(coef=double(nvar), first=double(nvar),
		    second=double(nvar), penalty= 0.0, flag=rep(FALSE,nvar))
	    length2 <- c(nvar, nvar, nvar, 1, nvar)
	    }
        ## in R, f.expr2 is passed as an argument later
	##.C("init_coxcall2", as.integer(sys.nframe()), expr2)
        }
    else full.imat <- FALSE

    #
    # Set up initial values for the coefficients
    #  If there are no sparse terms, finit is set to a vector of length 1
    #  rather than length 0, just to stop some "zero length" errors for
    #  later statements where fcoef is saved (but not used)
    #
    if (nfrail >0) finit <- rep(0,nfrail)
    else finit <- 0
    if (!missing(init) && !is.null(init)) {
	if (length(init) != nvar) {
	    if (length(init) == (nvar+nfrail)) {
		finit <- init[-(1:nvar)]
		init  <- init[1:nvar]
		}
	    else stop("Wrong length for inital values")
	    }
	}
    else init <- double(nvar)

    #
    # "Unpack" the passed in paramter list, 
    #   and make the initial call to each of the external routines
    #
    cfun <- lapply(pattr, function(x) x$cfun)
    parmlist <- lapply(pattr, function(x,eps) c(x$cparm, eps2=eps), sqrt(eps))
    extralist<- lapply(pattr, function(x) x$pparm)
    iterlist <- vector('list', length(cfun))
    thetalist <- vector('list', length(cfun))
    printfun  <- lapply(pattr, function(x) x$printfun)
    for (i in 1:length(cfun)) {
	temp <- (cfun[[i]])(parmlist[[i]], iter=0)
	if (sparse[i]) {
	    theta1 <- temp$theta
	    extra1 <- extralist[[i]]
	    }
	thetalist[[i]] <- temp$theta
	iterlist[[i]] <- temp
	}

    #
    # Manufacture the list of calls to cfun, with appropriate arguments
    #
    ## Amazingly, all this works in R, so I don't need to understand it.
    ##
    temp1 <- c('x', 'coef', 'plik', 'loglik', 'status', 'neff', 'df', 'trH')
    temp2 <- c('frailx', 'coxfit$fcoef', 'loglik1',  'coxfit$loglik', 'status',
	       'n.eff')
    temp3 <- c('xx[,pen.col]', 'coxfit$coef[pen.col]','loglik1',
	       'coxfit$loglik', 'status', 'n.eff')
    calls <- vector('expression', length(cfun))
    cargs <- lapply(pattr, function(x) x$cargs)
    for (i in 1:length(cfun)) {
	tempchar <- paste("(cfun[[", i, "]])(parmlist[[", i, "]], iter,",
			  "iterlist[[", i, "]]")
	temp2b <- c(temp2, paste('pdf[', i, ']'), paste('trH[', i, ']'))
	temp3b <- c(temp3, paste('pdf[', i, ']'), paste('trH[', i, ']'))
	if (length(cargs[[i]])==0) 
	    calls[i] <- parse(text=paste(tempchar, ")"))
	else {
	    temp <- match(cargs[[i]], temp1)
	    if (any(is.na(temp))) stop(paste((cargs[[i]])[is.na(temp)],
					    "not matched"))
	    if (sparse[i]) temp4 <- paste(temp2b[temp], collapse=',')
	    else           temp4 <- paste(temp3b[temp], collapse=',')
	    
	    calls[i] <- parse(text=paste(paste(tempchar,temp4,sep=','),')'))
	    }
        }
    need.df <- any(!is.na(match(c('df', 'trH'), unlist(cargs))))#do any use df?

    #
    # Last of the setup: create the vector of variable names
    #
    varnames <- dimnames(xx)[[2]]
    for (i in 1:length(cfun)) {
	if (!is.null(pattr[[i]]$varname))
		varnames[pcols[[i]]] <- pattr[[i]]$varname
        }

    ## need the current environment for callbacks
    rho<-environment()
    
    #
    # Have C store the data, and get the loglik for beta=initial, frailty=0
    #
    coxfit <- .C(routines[1],
                       as.integer(n),
                       as.integer(nvar), 
                       as.double(y),
                       x= as.double(xx) ,
                       as.double(offset - mean(offset)),
                       as.double(weights),
		       as.integer(newstrat),
		       as.integer(sorted-1),
                       means= double(nvar),
                       coef= as.double(init),
                       u = double(nvar),
		       loglik=double(1),
		       as.integer(method=='efron'),
		       as.integer(ptype),
		       as.integer(full.imat),
		       as.integer(nfrail),
		       as.integer(frailx),
                 #R callback additions
                 f.expr1,f.expr2,rho,
                 PACKAGE="survival"
                 )
    loglik0 <- coxfit$loglik
    means   <- coxfit$means

    #
    #  Now for the actual fit
    #
    iter2 <- 0
    iterfail <- NULL
    thetasave <- unlist(thetalist)
    for (outer in 1:control$outer.max) {
        coxfit <- .C(routines[2], 
		        iter=as.integer(control$iter.max),
			as.integer(n),
			as.integer(nvar),
		        as.integer(newstrat),
			coef = as.double(init),
		        u    = double(nvar+nfrail),
			hmat = double(nvar*(nvar+nfrail)),
			hinv = double(nvar*(nvar+nfrail)),
			loglik = double(1),
			flag = integer(1),
			as.double(control$eps),
		        as.double(control$toler.chol),
			as.integer(method=='efron'),
			as.integer(nfrail),
		        fcoef = as.double(finit),
			fdiag = double(nfrail+nvar),
                     ## R additions
                     f.expr1,f.expr2,rho,
                     PACKAGE="survival"
                     )

	iter <- outer
	iter2 <- iter2 + coxfit$iter
	if (coxfit$iter >=control$iter.max) iterfail <- c(iterfail, iter)

	# If any penalties were infinite, the C code has made fdiag=1 out
	#  of self-preservation (0 divides).  But such coefs are guarranteed
	#  zero so the variance should be too.)
	temp <- rep(FALSE, nvar+nfrail)
	if (nfrail>0) temp[1:nfrail] <- coxlist1$flag
	if (ptype >1) temp[nfrail+ 1:nvar] <- coxlist2$flag
	fdiag <- ifelse(temp, 0, coxfit$fdiag)

	if (need.df) {
            #get the penalty portion of the second derive matrix
	    if (nfrail>0) temp1 <- coxlist1$second
	    else 	  temp1 <- 0
	    if (ptype>1)  temp2 <- coxlist2$second
	    else          temp2 <- 0
					
	    dftemp <-coxpenal.df(matrix(coxfit$hmat, ncol=nvar),  
			         matrix(coxfit$hinv, ncol=nvar), fdiag, 
				 assign, ptype, nvar,
		                 temp1, temp2, pindex[sparse])
	    df <- dftemp$df
	    var  <- dftemp$var
	    var2 <- dftemp$var2
	    pdf <- df[pterms>0]	          # df's for penalized terms
	    trH <- dftemp$trH[pterms>0]   # trace H 
	    }

	if (nfrail >0)  penalty <- -coxlist1$penalty
	else            penalty <- 0
	if (ptype >1) penalty <- penalty - coxlist2$penalty
	loglik1 <- coxfit$loglik + penalty  #C code returns PL - penalty
	if (iter==1) penalty0 <- penalty

	#
	# Call the control function(s)
	#
	done <- TRUE
	for (i in 1:length(cfun)) {
	    pen.col <- pcols[[i]]
	    temp <- eval(calls[i])
	    if (sparse[i]) theta1 <- temp$theta
	    thetalist[[i]] <- temp$theta
	    iterlist[[i]] <- temp
	    done <- done & temp$done
    	    }
	if (done) break

	# 
	# Choose starting estimates for the next iteration
	#
	if (iter==1) {
	    init <- coefsave <- coxfit$coef
	    finit <- fsave   <- coxfit$fcoef
	    thetasave <- cbind(thetasave, unlist(thetalist))
	    }
	else {
	    # the "as.vector" removes names, dodging a bug in Splus5.1
	    temp <- as.vector(unlist(thetalist))
	    coefsave <- cbind(coefsave, coxfit$coef)
	    fsave    <- cbind(fsave, coxfit$fcoef)
	    # temp = next guess for theta
	    # *save = prior thetas and the resultant fits
	    # choose as initial values the result for the closest old theta
	    howclose <- apply((thetasave-temp)^2,2, sum)
	    which <- min((1:iter)[howclose==min(howclose)])
	    if (nvar>0)   init <- coefsave[,which]
	    if (nfrail>0) finit<- fsave[,which]
	    thetasave <- cbind(thetasave, temp)
	    }
        }

    # release the memory
    expect <- .C(routines[3], as.integer(n),
		             as.integer(nvar),
		             as.integer(newstrat),
		             as.integer(method=='efron'),
		             expect= double(n),PACKAGE="survival")$expect

    if (!need.df) {  #didn't need it iteration by iteration, but do it now
        #get the penalty portion of the second derive matrix
	if (nfrail>0) temp1 <- coxlist1$second
	else 	      temp1 <- 0
	if (ptype>1)  temp2 <- coxlist2$second
	else          temp2 <- 0
					
	dftemp <-coxpenal.df(matrix(coxfit$hmat,ncol=nvar),  
			     matrix(coxfit$hinv,ncol=nvar),  fdiag, 
		             assign, ptype, nvar, 
		             temp1, temp2, pindex[sparse])
	df <- dftemp$df
	trH <- dftemp$trH
	var <- dftemp$var
	var2  <- dftemp$var2
        }

    if (control$iter.max >1 && length(iterfail)>0)
	    warning(paste("Inner loop failed to coverge for iterations", 
			  paste(iterfail, collapse=' ')))
    which.sing <- (fdiag[nfrail + 1:nvar] ==0)
    
    coef <- coxfit$coef
    names(coef) <- varnames
    coef[which.sing] <- NA
    resid <- double(n)
    resid <- status - expect
    names(resid) <- rownames

    names(iterlist) <- names(pterms[pterms>0])

    if (nfrail >0) {
	lp <- offset + coxfit$fcoef[x[,fcol]]
	if (nvar >0) {   #sparse frailties and covariates
	    lp <- lp + x[,-fcol,drop=FALSE] %*%coef - sum(means*coef)
	    list(coefficients  = coef,
		 var    = var,
		 var2   = var2,
		 loglik = c(loglik0, loglik1),
		 iter   = c(iter, iter2),
		 linear.predictors = as.vector(lp),
		 residuals = resid,
		 means = means,
		 method= c('coxph.penal', 'coxph'),
		 frail = coxfit$fcoef,
		 fvar  = dftemp$fvar,
		 df = df, df2=dftemp$df2, 
		 penalty= c(penalty0, penalty),
		 pterms = pterms, assign2=assign,
		 history= iterlist,
		 coxlist1=coxlist1, 
		 printfun=printfun)
	    }
	else {  #sparse frailties only
	    list( loglik = c(loglik0, loglik1),
		 iter   = c(iter, iter2),
		 linear.predictors = as.vector(lp),
		 residuals = resid,
		 means = means,
		 method= c('coxph.penal', 'coxph'),
		 frail = coxfit$fcoef,
		 fvar  = dftemp$fvar,
		 df = df, df2=dftemp$df2, 
		 penalty = c(penalty0, penalty), 
		 pterms = pterms, assign2=assign,
		 history= iterlist,
		 printfun=printfun)
	    }
         }
    else {  #no sparse terms
	list(coefficients  = coef,
	     var    = var,
	     var2   = var2,
	     loglik = c(loglik0, loglik1),
	     iter   = c(iter, iter2),
	     linear.predictors = as.vector(x%*%coef) - sum(means*coef),
	     residuals = resid,
	     means = means,
	     method= c('coxph.penal', 'coxph'),
	     df = df, df2=dftemp$df2,
	     penalty= c(penalty0, penalty), 
	     pterms = pterms, assign2=assign,
	     history= iterlist,
	     coxlist2=coxlist2,
	     printfun= printfun)
	}
    }
# SCCS @(#)coxph.control.s	5.6 06/12/00
#
# Gather all of the control parameters for coxph into one spot
#
coxph.control <- function(eps=1e-9, 
                          toler.chol = .Machine$double.eps ^ .75, 
			  iter.max=20,
			  toler.inf= sqrt(eps), outer.max=10 ) {
    if (iter.max <0) stop("Invalid value for iterations")
    if (eps <=0) stop ("Invalid convergence criteria")
    if (eps <= toler.chol) 
	    warning("For numerical accuracy, tolerance should be < eps")
    if (toler.inf <=0) stop ("The inf.warn setting must be >0")
    list(eps=eps, toler.chol=toler.chol, iter.max=iter.max, 
	 toler.inf=toler.inf, outer.max=outer.max,
	 eps.miss = missing(eps), iter.miss = missing(iter.max))
    }
#SCCS  @(#)coxph.detail.s	4.11 07/20/98
coxph.detail <-  function(object) {
    method <- object$method
    if (method!='breslow' && method!='efron')
	stop(paste("Detailed output is not available for the", method,
			"method"))
    n <- length(object$residuals)
    rr <- object$residual
    weights <- object$weights        #always present if there are weights
    x <- object$x
    y <- object$y
    strat <- object$strata
    Terms <- object$terms
    if (!inherits(Terms, 'terms'))
	    stop("invalid terms component of object")
    strats <- attr(Terms, "specials")$strata

    if (is.null(y)  ||  is.null(x)) {
	temp <- coxph.getdata(object, y=TRUE, x=TRUE, strata=TRUE)
	y <- temp$y
	x <- temp$x
	if (length(strats)) strat <- temp$strata
	}

    nvar <- ncol(x)
    if (ncol(y)==2) {
	mintime <- min(y[,1])
	if (mintime < 0) y <- cbind( 2*mintime -1, y)
	else 	y <- cbind(-1,y)
	}
    if (is.null(strat)) {
	ord <- order(y[,2], -y[,3])
	newstrat <- rep(0,n)
	}
    else {
	ord <- order(strat, y[,2], -y[,3])
	newstrat <- c(diff(as.numeric(strat[ord]))!=0 ,1)
	}
    newstrat[n] <- 1

    # sort the data
    x <- x[ord,]
    y <- y[ord,]
    storage.mode(y) <- 'double'
    score <- exp(object$linear.predictor)[ord]
    if (is.null(weights)) weights <- rep(1,n)
    else                  weights <- weights[ord]

    ndeath <- sum(y[,3])
    ff <- .C("coxdetail", as.integer(n),
			  as.integer(nvar),
			  ndeath= as.integer(ndeath),
			  y = y,
			  as.double(x),
			  as.integer(newstrat),
			  index =as.double(score),
			  weights = as.double(weights),
			  means= c(method=='efron', double(ndeath*nvar)),
			  u = double(ndeath*nvar),
			  i = double(ndeath*nvar*nvar),
			  double(nvar*(3 + 2*nvar)),
             PACKAGE="survival")
    keep <- 1:ff$ndeath
    vname<- dimnames(x)[[2]]
    time <- y[ff$index[keep],2]
    names(time) <- NULL
    means<- (matrix(ff$means,ndeath, nvar))[keep,]
    score<-  matrix(ff$u, ndeath, nvar)[keep,]
    var <- array(ff$i, c(nvar, nvar, ndeath))[,,keep]
    if (nvar>1) {
	dimnames(means) <- list(time, vname)
	dimnames(score) <- list(time, vname)
	dimnames(var) <- list(vname, vname, time)
	}
    else {
	names(means) <- time
	names(score) <- time
	names(var) <- time
	}

    dimnames(ff$y) <- NULL
    temp <- list(time = time, means=means, nevent=ff$y[keep,1],
	 nrisk = ff$y[keep,2], hazard= ff$y[keep,3], score= score,  imat=var,
	 varhaz=ff$weights[keep], y=y, x=x)
    if (length(strats)) temp$strata <- table((strat[ord])[ff$index[keep]])
    if (!all(weights==1)) temp$weights <- weights
    temp
    }
# SCCS  @(#)coxph.fit.s	5.8 06/24/99
coxph.fit <- function(x, y, strata, offset, init, control,
			weights, method, rownames)
    {
    n <-  nrow(y)
    if (is.matrix(x))
        nvar <- ncol(x)
    else
        if (length(x)==0)
            nvar <-0
        else
            nvar <-1
    time <- y[,1]
    status <- y[,2]

    # Sort the data (or rather, get a list of sorted indices)
    if (length(strata)==0) {
	sorted <- order(time)
	newstrat <- as.integer(rep(0,n))
	}
    else {
	sorted <- order(strata, time)
	strata <- (as.numeric(strata))[sorted]
	newstrat <- as.integer(c(1*(diff(strata)!=0), 1))
	}
    if (missing(offset) || is.null(offset)) offset <- rep(0,n)
    if (missing(weights)|| is.null(weights))weights<- rep(1,n)
    else {
	if (any(weights<=0)) stop("Invalid weights, must be >0")
	weights <- weights[sorted]
	}
    stime <- as.double(time[sorted])
    sstat <- as.integer(status[sorted])

    if (nvar==0) {
	# A special case: Null model.
	#  (This is why I need the rownames arg- can't use x' names)
	# Set things up for 0 iterations on a dummy variable
	x <- as.matrix(rep(1.0, n))
	nullmodel <- TRUE
	nvar <- 1
	init <- 0
	maxiter <- 0
	}
    else {
	nullmodel <- FALSE
	maxiter <- control$iter.max
	if (!missing(init) && !is.null(init)) {
	    if (length(init) != nvar) stop("Wrong length for inital values")
	    }
	else init <- rep(0,nvar)
	}
    coxfit <- .C("coxfit2", iter=as.integer(maxiter),
		   as.integer(n),
		   as.integer(nvar), stime,
		   sstat,
		   x= x[sorted,] ,
		   as.double(offset[sorted] - mean(offset)),
		   as.double(weights),
		   newstrat,
		   means= double(nvar),
		   coef= as.double(init),
		   u = double(nvar),
		   imat= double(nvar*nvar), loglik=double(2),
		   flag=integer(1),
		   double(2*n + 2*nvar*nvar + 3*nvar),
		   as.double(control$eps),
		   as.double(control$toler.chol),
		   sctest=as.double(method=="efron")
                 ,PACKAGE="survival")

    if (nullmodel) {
	score <- exp(offset[sorted])
	coxres <- .C("coxmart", as.integer(n),
				as.integer(method=='efron'),
				stime,
				sstat,
				newstrat,
				as.double(score),
				as.double(weights),
				resid=double(n),
                     PACKAGE="survival")
	resid <- double(n)
	resid[sorted] <- coxres$resid
	names(resid) <- rownames

	list( loglik = coxfit$loglik[1],
	      linear.predictors = offset,
	      residuals = resid,
	      method= c('coxph.null', 'coxph') )
	}
    else {
	var <- matrix(coxfit$imat,nvar,nvar)
	coef <- coxfit$coef
	if (coxfit$flag < nvar) which.sing <- diag(var)==0
	else which.sing <- rep(FALSE,nvar)

	infs <- abs(coxfit$u %*% var)
	if (maxiter >1) {
	    if (coxfit$flag == 1000)
		   warning("Ran out of iterations and did not converge")
	    else {
		infs <- ((infs > control$eps) & 
			 infs > control$toler.inf*abs(coef))
		if (any(infs))
		warning(paste("Loglik converged before variable ",
			  paste((1:nvar)[infs],collapse=","),
			  "; beta may be infinite. "))
		}
	    }

	names(coef) <- dimnames(x)[[2]]
	lp <- c(x %*% coef) + offset - sum(coef*coxfit$means)
	score <- exp(lp[sorted])
	coxres <- .C("coxmart", as.integer(n),
				as.integer(method=='efron'),
				stime,
				sstat,
				newstrat,
				as.double(score),
				as.double(weights),
				resid=double(n),
                     PACKAGE="survival")
	resid <- double(n)
	resid[sorted] <- coxres$resid
	names(resid) <- rownames
	coef[which.sing] <- NA

	list(coefficients  = coef,
		    var    = var,
		    loglik = coxfit$loglik,
		    score  = coxfit$sctest,
		    iter   = coxfit$iter,
		    linear.predictors = as.vector(lp),
		    residuals = resid,
		    means = coxfit$means,
		    method='coxph')
	}
    }
# SCCS @(#)coxph.getdata.s	4.3 09/08/94
#
# Reconstruct the Cox model data.  This is done in so many routines
#  that I extracted it out.
#
# The "stratax" name is to avoid conflicts with the strata() function, but
#   still allow users to type "strata" as an arg.
#
coxph.getdata <- function(fit, y=TRUE, x=TRUE, stratax=TRUE, offset=FALSE) {
    ty <- fit$y
    tx <- fit$x
    strat <- fit$strata
    Terms <- fit$terms
    if (is.null(attr(Terms, 'offset'))) offset <- FALSE
    if (offset) x<- TRUE
    if (!inherits(Terms, 'terms'))
	    stop("invalid terms component of fit")
    strats <- attr(Terms, "specials")$strata
    if (length(strats)==0) stratax <- FALSE

    if ( (y && is.null(ty)) || (x && is.null(tx)) ||
	     (stratax && is.null(strat)) || offset) {
	# get the model frame
	m <- fit$model
	if (is.null(m)) m <- model.frame(fit)

	# Pull things out
	if (y && is.null(ty)) ty <- model.extract(m, 'response')

	if (offset) toff <- model.extract(m, 'offset')

	# strata was saved in the fit if and only if x was
	if (x && is.null(tx)) {
	    dropx <- untangle.specials(Terms, 'cluster')$terms
	    if (stratax) {
		temp <- untangle.specials(Terms, 'strata', 1)
		tx <- model.matrix(Terms[-c(dropx,temp$terms)], m)[,-1,drop=FALSE]
		strat <- strata(m[temp$vars], shortlabel=TRUE)
		}
	    else {
		if (length(dropx)) tx <- model.matrix(Terms[-dropx], m)[,-1,drop=FALSE]
		else               tx <- model.matrix(Terms, m)[,-1,drop=FALSE]
		}
	    }
	}
    else if (offset)
       toff <- fit$linear.predictors -(c(tx %*% fit$coef) - sum(fit$means*fit$coef))

    temp <- NULL
    if (y) temp <- c(temp, "y=ty")
    if (x) temp <- c(temp, "x=tx")
    if (stratax)  temp <- c(temp, "strata=strat")
    if (offset)  temp <- c(temp, "offset=toff")

    eval(parse(text=paste("list(", paste(temp, collapse=','), ")")))
    }
#SCCS  @(#)coxph.s	5.12 06/12/00
# Version with general penalized likelihoods

coxph <- function(formula=formula(data), data=parent.frame(),
	weights, subset, na.action, init,
	control, method= c("efron", "breslow", "exact"),
	singular.ok =TRUE, robust=FALSE,
	model=FALSE, x=FALSE, y=TRUE, ...) {

    method <- match.arg(method)
    call <- match.call()
    m <- match.call(expand=FALSE)
    temp <- c("", "formula", "data", "weights", "subset", "na.action")
    m <- m[ match(temp, names(m), nomatch=0)]
    special <- c("strata", "cluster")
    Terms <- if(missing(data)) terms(formula, special)
	     else              terms(formula, special, data=data)
    m$formula <- Terms
    m[[1]] <- as.name("model.frame")
    m <- eval(m, parent.frame())
    if (NROW(m)==0)
        stop("No (non-missing) observations")
    
    if (missing(control)) control <- coxph.control(...)
    Y <- model.extract(m, "response")
    if (!inherits(Y, "Surv")) stop("Response must be a survival object")
    weights <- model.extract(m, 'weights')
    offset<- attr(Terms, "offset")
    tt <- length(offset)
    offset <- if(tt == 0)
		    rep(0, nrow(Y))
	      else if(tt == 1)
		      m[[offset]]
	      else {
		    ff <- m[[offset[1]]]
		    for(i in 2:tt)
			    ff <- ff + m[[offset[i]]]
		    ff
		    }

    attr(Terms,"intercept")<- 1  #Cox model always has \Lambda_0
    strats <- attr(Terms, "specials")$strata
    cluster<- attr(Terms, "specials")$cluster
    dropx <- NULL
    if (length(cluster)) {
	if (missing(robust)) robust <- TRUE
	tempc <- untangle.specials(Terms, 'cluster', 1:10)
	ord <- attr(Terms, 'order')[tempc$terms]
	if (any(ord>1)) stop ("Cluster can not be used in an interaction")
	cluster <- strata(m[,tempc$vars], shortlabel=TRUE)  #allow multiples
	dropx <- tempc$terms
	}
    if (length(strats)) {
	temp <- untangle.specials(Terms, 'strata', 1)
	dropx <- c(dropx, temp$terms)
	if (length(temp$vars)==1) strata.keep <- m[[temp$vars]]
	else strata.keep <- strata(m[,temp$vars], shortlabel=TRUE)
	strats <- as.numeric(strata.keep)
	}

    ##if (length(dropx)) X <- model.matrix(Terms[-dropx], m)[,-1,drop=F]
    ##else               X <- model.matrix(Terms, m)[,-1,drop=F]
    ### this is inefficient, but subscripting loses the assign attribute
    if (length(dropx))
        newTerms<-Terms[-dropx]
    else
        newTerms<-Terms
    X<-model.matrix(newTerms,m)
    assign<-lapply(attrassign(X,newTerms)[-1],function(x) x-1)
    X<-X[,-1,drop=FALSE]
    
    
    type <- attr(Y, "type")
    if (type!='right' && type!='counting')
	stop(paste("Cox model doesn't support \"", type,
			  "\" survival data", sep=''))
    if (missing(init)) init <- NULL

    # Check for penalized terms
    pterms <- sapply(m, inherits, 'coxph.penalty')
    if (any(pterms)) {
	pattr <- lapply(m[pterms], attributes)
	# 
	# the 'order' attribute has the same components as 'term.labels'
	#   pterms always has 1 more (response), sometimes 2 (offset)
	# drop the extra parts from pterms
	temp <- c(attr(Terms, 'response'), attr(Terms, 'offset'))
	if (length(dropx)) temp <- c(temp, dropx+1)
	pterms <- pterms[-temp]
	temp <- match((names(pterms))[pterms], attr(Terms, 'term.labels'))
	ord <- attr(Terms, 'order')[temp]
	if (any(ord>1)) stop ('Penalty terms cannot be in an interaction')
	##pcols <- (attr(X, 'assign')[-1])[pterms]  
        pcols<-assign[pterms]
        
	#penalized are hard sometimes	
	if (control$eps.miss)   control$eps <- 1e-7
	if (control$iter.miss)  control$iter.max <- 20  

        fit <- coxpenal.fit(X, Y, strats, offset, init=init,
				control,
				weights=weights, method=method,
				row.names(m), pcols, pattr, assign)
	}
    else {
	if( method=="breslow" || method =="efron") {
	    if (type== 'right')  fitter <- get("coxph.fit")
	    else                 fitter <- get("agreg.fit")
	    }
	else if (method=='exact') fitter <- get("agexact.fit")
	else stop(paste ("Unknown method", method))

	fit <- fitter(X, Y, strats, offset, init, control, weights=weights,
			    method=method, row.names(m))
	}

    if (is.character(fit)) {
	fit <- list(fail=fit)
	class(fit) <- 'coxph'
	}
    else {
	if (!is.null(fit$coef) && any(is.na(fit$coef))) {
	   vars <- (1:length(fit$coef))[is.na(fit$coef)]
	   msg <-paste("X matrix deemed to be singular; variable",
			   paste(vars, collapse=" "))
	   if (singular.ok) warning(msg)
	   else             stop(msg)
	   }
	fit$n <- nrow(Y)
	class(fit) <- fit$method
	fit$terms <- Terms
	##fit$assign <- attr(X, 'assign')
        fit$assign<-assign
	if (robust) {
	    fit$naive.var <- fit$var
	    fit$method    <- method
	    # a little sneaky here: by calling resid before adding the
	    #   na.action method, I avoid having missings re-inserted
	    # I also make sure that it doesn't have to reconstruct X and Y
	    fit2 <- c(fit, list(x=X, y=Y, weights=weights))
	    if (length(strats)) fit2$strata <- strata.keep
	    if (length(cluster)) {
		temp <- residuals.coxph(fit2, type='dfbeta', collapse=cluster,
					  weighted=TRUE)
		# get score for null model
		if (is.null(init))
			fit2$linear.predictors <- 0*fit$linear.predictors
		else fit2$linear.predictors <- c(X %*% init)
		temp0 <- residuals.coxph(fit2, type='score', collapse=cluster,
					 weighted=TRUE)
		}
	    else {
		temp <- residuals.coxph(fit2, type='dfbeta', weighted=TRUE)
		fit2$linear.predictors <- 0*fit$linear.predictors
		temp0 <- residuals.coxph(fit2, type='score', weighted=TRUE)
	        }
	    fit$var <- t(temp) %*% temp
	    u <- apply(as.matrix(temp0), 2, sum)
	    fit$rscore <- coxph.wtest(t(temp0)%*%temp0, u, control$toler.chol)$test
	    }
	#Wald test
	if (length(fit$coef) && is.null(fit$wald.test)) {  
	    #not for intercept only models, or if test is already done
	    nabeta <- !is.na(fit$coef)
	    if (is.null(init)) temp <- fit$coef[nabeta]
	    else temp <- (fit$coef - init)[nabeta]
	    fit$wald.test <-  coxph.wtest(fit$var[nabeta,nabeta], temp,
					  control$toler.chol)$test
	    }
	na.action <- attr(m, "na.action")
	if (length(na.action)) fit$na.action <- na.action
	if (model) fit$model <- m
        ## else { ## we might want model=T and X=T
        if (x)  {
            fit$x <- X
            if (length(strats)) fit$strata <- strata.keep
        }
        if (y)     fit$y <- Y
        ##    }
    }
    if (!is.null(weights) && any(weights!=1)) fit$weights <- weights
    
    ##fit$formula <- as.vector(attr(Terms, "formula"))
    fit$formula<-formula(Terms) ## get the environments right
    fit$call <- call
    fit$method <- method
    fit
    }
# SCCS @(#)coxph.wtest.s	1.2 10/28/98
#
# A Wald test routine, used by the Cox model
#  Why not just do  sum(b * solve(var, b))? -- because the solve
#  function chokes on singular matrices.
#
coxph.wtest <- function(var, b, toler.chol=1e-9) {
    if (is.matrix(b)) {
        nvar <- nrow(b)
        ntest<- ncol(b)
        }
    else {
        nvar <- length(b)
        ntest<- 1
        }
    
    if (length(var)==1) {
        if (nvar ==1) return(list(test=b*b/var, df=1, solve=b/var))
        else stop("Argument lengths do not match")
        }

    if (!is.matrix(var) || (nrow(var) != ncol(var)))
            stop("First argument must be a square matrix")
    if (nrow(var) != nvar) stop("Argument lengths do not match")

    temp <- .C('coxph_wtest', df=as.integer(nvar),
                              as.integer(ntest),
                              as.double(var),
                              tests= as.double(b),
                              solve= double(nvar*ntest),
	                      as.double(toler.chol),
               PACKAGE="survival")
    if (ntest==1) list(test=temp$tests[1], df=temp$df, solve=temp$solve)
    else          list(test=temp$tests[1:ntest], df=temp$df, 
                       solve=matrix(temp$solve, nvar, ntest))
    }


as.data.frame.difftime <-as.data.frame.vector


as.date <- function(x, order = "mdy", ...) {
    if (inherits(x, "date")) x
    else if (inherits(x,"POSIXt")){
	rval<-difftime(x,ISOdate(1960,1,1),units="days")
	structure(rval,class="date")
	}
    else if (is.character(x)) {
	order.vec <-
            switch(order,
                   "ymd" = c(1, 2, 3),
                   "ydm" = c(1, 3, 2),
                   "mdy" = c(2, 3, 1),
                   "myd" = c(2, 1, 3),
                   "dym" = c(3, 1, 2),
                   "dmy" = c(3, 2, 1),
                   stop("Invalid value for `order' option"))
	nn <- length(x)
	temp <- .C("char_date",
                   as.integer(nn),
                   as.integer(order.vec),
                   as.character(x),
                   month =integer(nn),
                   day = integer(nn),
                   year = integer(nn),
                   PACKAGE = "survival")
	month <- ifelse(temp$month < 1 | temp$month > 12, NA, temp$month)
	day   <- ifelse(temp$day == 0, NA, temp$day)
	year  <- ifelse(temp$year == 0, NA, temp$year)
	temp <- mdy.date(month, day, year, ...)
    }
    else if (is.numeric(x)) {
	temp <- floor(x)
	attr(temp, "class") <- "date"
	}
    else stop("Cannot coerce to date format")
    temp
}
is.date <- function(x)
    inherits(x, "date")

Ops.date <- function(e1, e2) {
    ## Certain operation yield a date, others just give a number.  In
    ## order to make plotting functions work well, we end up allowing
    ## most all numeric operations.
    if (missing(e2))
        stop("Unary operations not meaningful for dates")
    if (.Generic == "&" || .Generic== "|")
	stop(paste("\`", .Generic, "' not meaningful for dates",
                   sep = ""))
    class(e1) <- NULL
    class(e2) <- NULL
    if (.Generic == "-") {
	if (.Method[2] == "" ) {
            ## subtract a constant from a date 
            e1 <- as.integer(e1 - e2)
            class(e1) <- "date"
            e1
        }
	else if ((.Method[1] == "Ops.date" && .Method[2] == "Ops.date") ||
		 (.Method[1] == ""))
            e1 - e2
	else
            ## date - factor should fail
            stop("Invalid operation for dates")
	}
    else if (.Generic == "+") {
	if (.Method[1] == "" || .Method[2]=="")  {
            ## add constant to a date
            e1 <- as.integer(e1 + e2);
            class(e1) <- "date"
            e1
        }
	else e1 + e2
	}
    else get(.Generic)(e1, e2)
}
Math.date <- function(...)
    stop("Invalid operation on dates")
Summary.date <- function (..., na.rm = FALSE) {
    ok <- switch(.Generic, min = , max = , range = TRUE, FALSE)
    if (!ok)
        stop(paste(.Generic, "not defined for dates"))
    as.date(NextMethod(.Generic))
}

"[.date" <- function(x, ..., drop = TRUE) {
    cl <- class(x)
    class(x) <- NULL
    x <- NextMethod("[")
    class(x) <- cl
    x
}
"[[.date" <- function(x, ..., drop = TRUE) {
    cl <- class(x)
    class(x) <- NULL
    x <- NextMethod("[[")
    class(x) <- cl
    x
}

as.character.date <- function(x) {
    fun <- options()$print.date
    if (is.null(fun))
        date.ddmmmyy(x)
    else
        get(fun)(x)
}
as.data.frame.date <- as.data.frame.vector

as.vector.date <- function(x, mode = "any") {
    if (mode == "any" || mode == "character" || mode == "logical" || 
        mode == "list") 
        as.vector(as.character(x), mode)
    else as.vector(unclass(x), mode)
}
    
is.na.date <- function(x) {
    NextMethod(.Generic)
}
plot.date <- function(x, y, ..., axes, xaxt, xlab, ylab,
                      xlim = range(x, na.rm = TRUE),
                      ylim = range(y, na.rm = TRUE))
{
    if(missing(xlab))
        xlab <- deparse(substitute(x))
    if(missing(ylab))
        ylab <- deparse(substitute(y))
    class(x) <- NULL                    # after deparse(substitute())
    if(!missing(axes) && !axes)         # argument axes works
        plot(x, y, ..., axes = axes, xlab = xlab, ylab = ylab,
            xlim = xlim, ylim = ylim)
    else if(!missing(xaxt))
        plot(x, y, ..., xaxt = xaxt, xlab = xlab, ylab = ylab,
            xlim = xlim, ylim = ylim)
    else {
        plot(x, y, ..., xaxt = "n", xlab = xlab, ylab = ylab,
            xlim = xlim, ylim = ylim)
        x <- c(x[!is.na(x)], xlim)      # draws axis completely when
                                        # using xlim
        xd <- date.mdy(x)
        ## get default for n from par("lab")
        temp <- pretty(x, n = par("lab")[1])
        delta <- temp[2] - temp[1]
        if(delta < 1)
            temp <- seq(min(x), max(x), 1)
        else if(delta > 182) {
            temp <- xd$year + (x - mdy.date(1, 1, xd$year))/365
            ## get default for n from par("lab")
            temp <- pretty(temp, n = par("lab")[1]) 
            temp <- mdy.date(1, 1, floor(temp)) + floor((temp %% 1) * 365)
        }
        axis(1, temp, as.character.date(temp), ...)
    }
}

print.date <- function(x, quote, prefix, ...) {
    if (missing(quote))
        quote <- FALSE
    invisible(print(as.character(x), quote = quote))
}

print.date <- function(x, quote, prefix, ...) {
    fun <- options()$print.date
    if (is.null(fun))
        x <- date.ddmmmyy(x)
    else
        x <- get(fun)(x)
    if (missing(quote))
        quote <- FALSE
    invisible(print(x, quote=quote))
}

summary.date <- function(object, ...) {
    y <- as.character(range(object))
    names(y) <- c("First ", "Last  ")
    y
}

mdy.date <- function(month, day, year, nineteen = TRUE, fillday = FALSE,
                     fillmonth = FALSE) {
    ## Get the Julian date, but centered a la SAS, i.e., Jan 1 1960 is
    ## day 0.  Algorithm taken from Numerical Recipies.
    temp <- any((month != trunc(month)) |
                (day != trunc(day)) |
                (year != trunc(year)))
    if (!is.na(temp) && temp) {
	warning("Non integer input values were truncated in mdy.date")
	month <- trunc(month)
	day <- trunc(day)
	year <- trunc(year)
    }
    if (nineteen)
        year <- ifelse(year < 100, year + 1900, year)

    ## Force input vectors to be the same length, but in a way that
    ## gives an error if their lengths aren't multiples of each other.
    temp <- numeric(length(month + day + year))
    month <- month + temp
    day   <- day + temp
    year  <- year + temp

    if (fillmonth) {
	temp <- is.na(month)
	month[temp] <- 7
	day[temp] <- 1
	}
    if (fillday) day[is.na(day)] <- 15

    month[month < 1 | month > 12] <- NA
    day[day < 1] <- NA
    year[year == 0] <- NA               # there is no year 0
    year <- ifelse(year < 0, year + 1, year)
    tyear<- ifelse(month > 2, year, year - 1)
    tmon <- ifelse(month > 2, month + 1, month + 13)

    julian <-
        trunc(365.25 * tyear) + trunc(30.6001 * tmon) + day - 715940
    ## Check for Gregorian calendar changeover on Oct 15, 1582
    temp <- trunc(0.01 * tyear)
    save <- ifelse(julian >= -137774,
                   julian + 2 + trunc(.25 * temp) - temp,
                   julian)

    ## Check for invalid days (31 Feb, etc.) by calculating the Julian
    ## date of the first of the next month
    year <- ifelse(month == 12, year+1, year)
    month<- ifelse(month == 12, 1, month + 1)
    day <- 1
    tyear<- ifelse(month > 2, year, year - 1)
    tmon <- ifelse(month > 2, month + 1, month + 13)
    julian <-
        trunc(365.25 * tyear) + trunc(30.6001 * tmon) + day - 715940
    temp <- trunc(0.01 * tyear)
    save2<- ifelse(julian >= -137774,
                   julian + 2 + trunc(.25 * temp) - temp,
                   julian)

    temp <- as.integer(ifelse(save2 > save, save, NA))
    attr(temp, "class") <- "date"
    temp
}
date.mdy <- function(sdate, weekday = FALSE) {
    ##  Return the month, day, and year given a julian date
    attr(sdate, "class") <- NULL        # Stop any propogation of methods
    sdate <- sdate + 2436935            # From SAS to Num Recipies base
                                        # point 
    wday <- as.integer((sdate + 1) %% 7 +1)
    temp <- ((sdate - 1867216) -.25) / 36524.25
    sdate <- ifelse(sdate >= 2299161,
                    trunc(sdate+ 1 +temp - trunc(.25 * temp)),
                    sdate)
    jb <- sdate + 1524
    jc <- trunc(6680 + ((jb - 2439870) - 122.1) / 365.25)
    jd <- trunc(365.25 * jc)
    je <- trunc((jb - jd)/ 30.6001)
    day <- (jb - jd) - trunc(30.6001 * je)
    month <- as.integer(ifelse(je > 13, je - 13, je - 1))
    year  <- as.integer(ifelse(month > 2, jc - 4716, jc - 4715))
    year  <- as.integer(ifelse(year <= 0, year - 1, year))
    if (weekday)
        list(month = month, day = day, year = year, weekday = wday)
    else
        list(month = month, day = day, year = year)
}

date.ddmmmyy <- function(sdate) {
    temp <- date.mdy(sdate)
    tyr <- ifelse(floor(temp$year/100) == 19,
                  temp$year-1900, temp$year)
    month <- month.abb[temp$month]
    ifelse(is.na(sdate), "NA",
           paste(temp$day, month, tyr, sep = ""))
}
date.mmddyy <- function(sdate, sep = "/") {
    temp <- date.mdy(sdate)
    tyr <- ifelse(floor(temp$year / 100) == 19,
                  temp$year - 1900, temp$year)
    ifelse(is.na(sdate), "NA",
           paste(temp$month, temp$day, tyr, sep = sep))
}
date.mmddyyyy <- function(sdate, sep = "/") {
    temp <- date.mdy(sdate)
    ifelse(is.na(sdate), "NA",
           paste(temp$month, temp$day, temp$year, sep = sep))
}
.onLoad <- function(lib, pkg) {
	 ## moved to NAMESPACE
          ##library.dynam("survival", pkg, lib)
          ## survfit.print.n=="start" is compatible with previous R
          ##     and with MASS
          if (is.null(getOption("survfit.print.n")))
              options(survfit.print.n="start")
          ## survfit.print.mean==TRUE is compatible with previous R/SPLUS
          ##     (but is silly)
          if (is.null(getOption("survfit.print.mean")))
              options(survfit.print.mean=TRUE)
      }



is.category <- function(x) inherits(x,"factor") || is.factor(x)



labels.survreg <- function(object, ...) attr(object,"term.labels")

# SCCS @(#)format.Surv.s	4.7 12/22/98
#
format.Surv <- function(x, ...) format(as.character.Surv(x), ...)

# The function to "make something suitable for inclusion in a data frame"
#   was "as.data.frame.x" in versions <5, now it is "data.frameAux.x",
#   so here we have a version specific definition.

if (version$major >= 5) {
    data.frameAux.Surv <- function(x, ...) data.frameAux.AsIs(x, ...)
    } else {
    as.data.frame.Surv <- as.data.frame.model.matrix
    }
# SCCS @(#)frailty.brent.s	1.1 10/28/98
#
# Brent's method for finding a maximum
#  If upper and/or lower is given, it transforms x to stay out of trouble
#  during the "bracketing" phase
#
frailty.brent <- function(x, y, lower, upper) {
    n <- length(x)
    if (length(y) != n) stop ("Length mismatch for x and y")
 
    if (n<3) return(mean(x))

    # First, is the solution bracketed?
    # If not, take big steps until it is
    ord <- order(x)
    xx <- x[ord]
    yy <- y[ord]
    best <- (1:n)[yy==max(y)]
    if (length(best) >1) stop("Ties for max(y), I surrender") #fix this later
    if (best==1) {
	new <- xx[1] - 3*(xx[2] - xx[1])
	if (!missing(lower) && !is.null(lower) && new < lower)
		new <- lower + (min(xx[xx>lower])-lower)/10
	return(new)
	}
    if (best==n) {
	new <- xx[n] + 3*(xx[n] - xx[n-1])
	if (!missing(upper) && !is.null(upper) && new > upper)
		new <- upper + (max(xx[xx<upper])-upper)/10
	return(new)
	}

    # Ok, it's bracketed.  Do a quadratic extrapolation
    # Now, these are my best 3 guesses so far
    xx <- xx[(best-1): (best+1)]
    yy <- yy[(best-1): (best+1)]
    temp1 <- (xx[2] -xx[1])^2 *(yy[2]-yy[3]) - (xx[2]-xx[3])^2 * (yy[2]-yy[1])
    temp2 <- (xx[2] -xx[1])   *(yy[2]-yy[3]) - (xx[2]-xx[3])   * (yy[2]-yy[1])
    new <- xx[2] - .5*temp1/temp2

    # if the new guess is outside the bracketing interval, or it is
    #   "bouncing around", then use golden section
    if (new < xx[1] || new > xx[3] ||
	      ( (n>4) && (new-x[n]) > .5*abs(x[n-1]-x[n-2]))) {
        if ((xx[2]-xx[1]) > (xx[3]-xx[2]))  return(xx[2] - .38*(xx[2]-xx[1]))
        else                                return(xx[2] + .32*(xx[3]-xx[2]))
	}
    else return(new)
    }
    
#   SCCS @(#)frailty.controlaic.s	1.3 01/14/99
# Control function to minimize the AIC
#  the optional paramater "caic" chooses corrected aic (default=F)
# n is the "effective" sample size
#

frailty.controlaic <- function(parms, iter, old, n, df, loglik) {
    if (iter==0) {  # initial call
	if (is.null(parms$init)) theta <-0.005
	else theta <- parms$init[1]
	return(list(theta=theta, done=FALSE))
	}
    
    # by default, do the corrected AIC
    if (length(parms$caic)) correct <- parms$caic
    else correct <- FALSE

    if (n < df+2) dfc <- (df -n) + (df+1)*df/2 -1  #avoid pathology
    else          dfc <- -1 + (df+1)/(1- ((df+2)/n))
    if (iter==1) { # Second guess in series
	history <- c(theta=old$theta, loglik=loglik,
		     df=df, aic=loglik-df, aicc=loglik - dfc)
	if (length(parms$init) <2) theta <-1
	else theta <- parms$init[2]
	temp <- list(theta=theta, done=FALSE, history=history)
	return(temp)
	}

    history <- rbind(old$history,c(old$theta, loglik, df, loglik-df, 
				   loglik -dfc))
    if (is.null(parms$trace))
        trace <-FALSE
    else
        trace <- parms$trace
    
    if (iter==2) {  #Third guess
	theta <- mean(history[,1])
	return(list(theta=theta, done=FALSE, history=history))
	}
    #
    # Ok, now we're ready to actually use prior data
    # Now, history has iter rows, each row contains the 
    # value of theta, the Cox PL, the df, aic, and corrected aic
    if (correct) aic <- history[,5]   #use corrected aic for convergence
    else         aic <- history[,4]

    done <- (abs(1- aic[iter]/aic[iter-1]) < parms$eps)
    x <- history[,1]
    
    if (x[iter]== max(aic) && x[iter]==max(x)) 
	    newtheta <- 2* max(x)
    else  newtheta <- frailty.brent(x, aic, lower=parms$lower, 
				    upper=parms$upper)
    
    if (length(parms$trace) && parms$trace) {
	print(history)
	cat("    new theta=", format(newtheta), "\n\n")
	}
    list(theta=newtheta, done=done, history=history)
    }
        
# SCCS @(#)frailty.controldf.s	1.3 12/02/98
# A function to calibrate the df
#    very empirical  
# Find the closest 3 points that span the target value
#   We know the function is monotone, so fit the function
#     dy = a * (dx)^p   to the 3 points, where dx and dy are the distance
#     from the leftmost of the three points.
#   This method can fail near a boundary, so use step halving if things don't
#     go well
# On input, parms$df = target degrees of freedom
#           parms$dfs, parms$thetas = known values (usually 0,0)
#           parms$guess = first guess
#
frailty.controldf <- function(parms, iter, old, df) {
    if (iter==0) {  
	theta <- parms$guess
	return(list(theta=theta, done=FALSE, 
		    history=cbind(thetas=parms$thetas, dfs=parms$dfs)))
	}

    eps <- parms$eps
    if (length(eps)==0) eps <- .1

    thetas <- c(old$history[,1], old$theta)
    dfs    <- c(old$history[,2], df)
    nx <- length(thetas)
    if (nx==2) {
	#linear guess based on first two 
	# but try extra hard to bracket the root
	theta <- thetas[1] + (thetas[2]-thetas[1])*(parms$df - dfs[1])/
						    (dfs[2] - dfs[1])
	if (parms$df > df) theta <- theta * 1.5 
	return(list(theta=theta, done=FALSE,
		    history=cbind(thetas=thetas, dfs=dfs), half=0))
	}
    else{
	# Now, thetas= our guesses at theta
	#  dfs = the degrees of freedom for each guess
	done <- (iter>1 &&
		 (abs(dfs[nx]-parms$df) < eps))

	# look for a new minimum
	x <- thetas
	y <- dfs
	target <- parms$df

	# How am I doing
	if ( abs( (y[nx]-target)/(y[nx-1]-target)) > .6) doing.well <- FALSE
	else doing.well <- TRUE
	
	ord <- order(x)
	if ((x[1]-x[2])*(y[1]-y[2]) >0)  y <- y[ord]  #monotone up
	else  { #monotone down
	    y <- -1* y[ord]
	    target <- -target
	    }
	x <- x[ord]

	if (all(y>target)) b1 <- 1     #points 1:3 are the closest then
	else if (all(y<target)) b1 <- nx-2
	else {
	    b1 <- max((1:nx)[y <= target]) #this point below target, next above
	    if (!doing.well && old$half<2) {
		#try bisection
		if (length(parms$trace) && parms$trace){
		    print(cbind(thetas=thetas, dfs=dfs))
		    cat("  bisect:new theta=" , format( mean(x[b1+0:1])), 
			"\n\n")
		    }
		return(list(theta= mean(x[b1+0:1]),done=done, 
			      history=cbind(thetas=thetas, dfs=dfs), 
				            half=old$half+1))
		}
	    # use either b1,b1+1,b1+2 or  b1-1, b1, b1+1, whichever is better
	    #  better = midpoint of interval close to the target

	    if ((b1+1)==nx ||
		(b1>1 &&  ((target -y[b1]) < (y[b1+1] -target))))
		    b1 <- b1-1
	    }

	#now have the best 3 points
	# fit them with a power curve anchored at the leftmost one
	b2 <- b1 + 1:2	
	xx <- log(x[b2] - x[b1])
	yy <- log(y[b2] - y[b1])
	power <- diff(yy)/diff(xx)
	a <- yy[1] - power*xx[1]
	newx <- (log(target -y[b1]) - a)/power
	if (length(parms$trace) && parms$trace){
	    print(cbind(thetas=thetas, dfs=dfs))
	    cat("  new theta=" , format(x[b1] + exp(newx)), "\n\n")
	    }
	list(theta=x[b1] + exp(newx), done=done, 
	     history=cbind(thetas=thetas, dfs=dfs), half=0)
	}
    }

# SCCS @(#)frailty.controlgam.s	1.1 10/28/98
#
# The control function for a single Gamma frailty term.
#
frailty.controlgam <- function(opt, iter, old, group, status, loglik){
    
    if (iter==0) {
	# initial call
	if (!is.null(opt$theta)) theta <- opt$theta  #fixed theta case
	else {
	    if (is.null(opt$init)) theta <- 0   #no initial value -- use 0
	    else theta <- opt$init[1]
	    }
	list(theta=theta)
        }
    
    else {
	if (is.null(opt$trace)) trace <-FALSE
	else trace <- opt$trace

	theta <- old$theta
	
	#compute correction to the loglik
	if (theta==0) correct <- 0
	else {
	    if (is.matrix(group)) group <- c(group %*% 1:ncol(group))
	    d <- tapply(status,group,sum)
	    correct <- frailty.gammacon(d, 1/theta)
	    }

	if (!is.null(opt$theta)) # fixed theta case
	    list(theta=theta, done=TRUE, c.loglik=loglik + correct)
	else {
	    # save history of the iteration, and get the next theta
	    if (iter==1) history <- c(theta=theta, loglik=loglik, 
		  c.loglik=loglik + correct)
	    else history <- rbind(old$history, 
			 as.vector(c(theta, loglik, 
				     loglik + correct)))
	
	    if (iter==1) {
		if (is.null(opt$init )) theta <-1
		else                    theta <- opt$init[2]
		list(theta=theta, done=FALSE, history=history,
		     c.loglik= loglik+correct)
	        }
	    else if (iter ==2) {
		if (history[2,3] < (history[1,3] +1)) 
			theta <- mean(history[1:2,1])
		else    theta <- 2*history[2,1]

                if (trace) {
		    print(history)
		    cat("    new theta=", theta, "\n\n")
		    }
		list(theta=theta, done=FALSE, history=history,
		     c.loglik= loglik+correct)
		}
	    else {
		#Now, history has iter rows, each row contains the value
	        # of theta, the Cox PL, and the full LL
	        done <- (abs(1- history[iter,3]/history[iter-1,3]) < opt$eps)
		x <- history[,1]
		y <- history[,3]

		if (y[iter]== max(y) && x[iter]==max(x)) newtheta <- 2* max(x)
		else  newtheta <- frailty.brent(sqrt(x), y, lower=0)^2
                if (trace) {
		    print(history)
		    cat("    new theta=", format(newtheta), "\n\n")
		    }
		list(theta=newtheta, done=done, history=history, 
		     c.loglik = loglik + correct)
		}
	    }
        }
    }
	
# SCCS @(#)frailty.controlgauss.s	1.3 11/21/98
#
# The control function for REML on a gaussian
#
frailty.controlgauss <- function(opt, iter, old, fcoef, trH, loglik){
    if (iter==0) {
	# initial call
	#  Because of how the iteration works, 0 is not a useful trial value
	if (!is.null(opt$theta)) theta <- opt$theta  #fixed theta case
	else {
	    if (is.null(opt$init)) theta <- 1
	    else theta <- opt$init[1]
	    }
	list(theta=theta)
        }
    
    else {
	if (is.null(opt$trace)) trace <-FALSE
	else trace <- opt$trace

	nfrail <- length(fcoef)
	fsum   <- sum(fcoef^2)
	theta <- old$theta
	resid <- fsum/(nfrail - trH/theta) - theta

	# save history of the iteration, and get the next theta
	if (iter==1) {
	    history <- c(theta=theta, resid=resid, fsum=fsum, trace=trH)
	    if (is.null(opt$init )) {
		if (resid>0)  theta <- theta*3
		else          theta <- theta/3  	        }
	    else theta <- opt$init[2]
	    list(theta=theta, done=FALSE, history=history)
	    }
	else {
            history <- rbind(old$history,
			     as.vector(c(theta, resid, fsum, trH)))
	    if (iter ==2) {
		if (all(history[,2] > 0))     theta <- history[2,1]*2
		else if (all(history[,2] <0)) theta <- history[2,1]/2
		else    		theta <- mean(history[1:2,1])
                if (trace) {
		    print(history)
		    cat("    new theta=", theta, "\n\n")
		    }
		list(theta=theta, done=FALSE, history=history)
		}
	    else {
		done <- (abs(history[iter,2]) < opt$eps)
		ord <- order(history[,1])
		tempy <- history[ord,2]  #x & y from left to right
		tempx <- history[ord,1]
		# make sure we have one positve and one negative y value
		#  y must be positive near 0, and negative for large x
		if (all(tempy>0))  newtheta <- 2*max(tempx)
		else if (all(tempy<0)) newtheta <- .5 * min(tempx)
		else{##FIXME: should use uniroot().
		    #find the latest point, and one on each side of 0
		    b1 <- (1:iter)[ord==iter]
		    if (b1==1) b1 <-2
		    else if (b1==iter) b1 <- iter-1

		    # Brent's formula, straight from Numerical Recipies
		    guess <- history[iter- (2:0),1]
		    R <- tempy[b1]/ tempy[b1+1]
		    S <- tempy[b1]/ tempy[b1-1]
		    U <- R/S
		    P <- S* (U*(R-U)*(tempx[b1+1]-tempx[b1]) - 
			    (1-R)*(tempx[b1]-tempx[b1-1]))
		    Q <- (U-1)*(R-1)*(S-1)
		    newtheta <- tempx[b1] + P/Q
		    # if the new guess is outside the brackets, do a binomial
		    #   search step
		    if (newtheta > tempx[b1+1]) newtheta <- mean(tempx[b1+0:1])
		    if( newtheta < tempx[b1-1]) newtheta <- mean(tempx[b1-0:1])
		    }
                if (trace) {
		    print(history)
		    cat("    new theta=", format(newtheta), "\n\n")
		    }
		list(theta=newtheta, done=done, history=history)
		}
	    }
        }
    }
	
# SCCS @(#)frailty.gamma.s	1.4 06/10/00
# 
# Defining function for gamma frailty fits
#
frailty.gamma <- function(x, sparse=(nclass >5), theta, df, eps= 1e-5, 
			  method=c("em", "aic", "df", "fixed"), ...) {
    nclass <- length(unique(x))
    if (sparse)	x <-as.numeric(as.factor(x))
    else{
	x <- as.factor(x)
	##attr(x,'contrasts') <- function(n,...) contr.treatment(n,F)
        attr(x,"contrasts")<-contr.treatment(nclass,contrasts=FALSE)
        }
    class(x) <- c("coxph.penalty",class(x))

    # Check for consistency of the arguments
    if (missing(method)) {
	if (!missing(theta)) {
	    method <- 'fixed'
	    if (!missing(df)) 
		    stop("Cannot give both a df and theta argument")
	    }
	else if (!missing(df)) method <- 'df'
	}
    method <- match.arg(method)
    if (method=='df' && missing(df)) stop("Method = df but no df argument")
    if (method=='fixed' && missing(theta))
	    stop("Method= fixed but no theta argument")
    if (method!='df' && !missing(df)) 
	    stop("Method is not df, but have a df argument")
    if (method !='fixed' && !missing(theta)) 
	    stop("Method is not 'fixed', but have a theta argument")

    pfun<- function(coef, theta, ndeath){
	if (theta==0) list(recenter=0, penalty=0, flag=TRUE)
	else {
	      recenter <- log(mean(exp(coef)))
	      coef <- coef - recenter
	      nu <- 1/theta
	      list(recenter=recenter,
		   first=   (exp(coef) -1) * nu,
		   second=  exp(coef) * nu,
		   penalty= -sum(coef)*nu,   # The exp part sums to a constant
		   flag=FALSE)
	           }
	   }

    printfun <- function(coef, var, var2, df, history) {
	if (!is.null(history$history)) 
	     theta <- history$history[nrow(history$history),1]
	else theta <- history$theta
	clog  <- history$c.loglik
	
	if (is.matrix(var)) test <- coxph.wtest(var, coef)$test
	else 		    test <- sum(coef^2/var)
	df2 <- max(df, .5)      # Stop silly p-values
	list(coef=c(NA, NA, NA, test, df, 1-pchisq(test, df2)),
		 history=paste("Variance of random effect=", format(theta),
	                       "  I-likelihood =", 
		         format(round(clog,1), digits=10)))
	}

    if (method=='fixed') {
	temp <- list(pfun=pfun,
		     printfun=printfun,
		     diag =TRUE,
		     sparse= sparse,
		     cargs = c("x", "status", "loglik"),		 
		     cfun = frailty.controlgam,
		     cparm= list(theta=theta, ...))
        }
    else if (method=='em'){
	temp <- list(pfun=pfun,
		     printfun=printfun,
		     diag =TRUE,
		     sparse= sparse,
		     cargs = c("x", "status", "loglik"),		 
		     cfun = frailty.controlgam,
		     cparm= c(list(eps=eps), ...))
	}
    
    else if (method=='aic') {
	temp <- list(pfun=pfun,
		     printfun=printfun,
		     diag =TRUE,
		     sparse= sparse,
		     cargs = c("x", "status", "loglik", "neff","df", "plik"),
		     cparm=list(eps=eps, lower=0, init=c(.1, 1), ...),
		     cfun =function(opt, iter, old, group, status, loglik,...){
			 temp <- frailty.controlaic(opt, iter, old, ...)
			 if (iter >0) {
			     #compute correction to the loglik
			     if (old$theta==0) correct <- 0
			     else {
				 if (is.matrix(group)) 
					 group <-c(group %*% 1:ncol(group))
				 d <- tapply(status,group,sum)
				 correct <- frailty.gammacon(d, 1/old$theta)
				 }
			     temp$c.loglik <- loglik + correct
			     }
			 temp
			 })
	}
    else {  #df method
	# The initial guess is based on the observation that theta=1 often
	#   gives about df= (#groups)/3
	if (missing(eps)) eps <- .1
	temp <- list(pfun=pfun,
		     printfun=printfun,
		     diag =TRUE,
		     sparse= sparse,
		     cargs= c('df', "x", "status", "loglik"),
		     cparm=list(df=df, thetas=0, dfs=0, eps=eps,
		                guess=3*df/length(unclass(x)), ...),
		     cfun =function(opt, iter, old, df, group, status, loglik){
			 temp <- frailty.controldf(opt, iter, old, df)
			 if (iter >0) {
			     #compute correction to the loglik
			     if (old$theta==0) correct <- 0
			     else {
				 if (is.matrix(group)) 
					 group <-c(group %*% 1:ncol(group))
				 d <- tapply(status,group,sum)
				 correct <- frailty.gammacon(d, 1/old$theta)
				 }
			     temp$c.loglik <- loglik + correct
			     }

			 temp
		         })
	}

    # If not sparse, give shorter names to the coefficients, so that any
    #   printout of them is readable.
    if (!sparse) {
	vname <- paste("gamma", levels(x), sep=':')
	temp <- c(temp, list(varname=vname))
	}
    attributes(x) <- c(attributes(x), temp)
    x
    }

			  
			   
			   
#  SCCS @(#)frailty.gammacon.s	1.2 02/21/99
# Correct the loglik for a gamma frailty
#  Term2 is the hard one, discussed in section 3.5 of the report
# The penalty function only adds \vu \sum(w_j) to the CoxPL, so this
#  does a bit more than equation 15.
#
frailty.gammacon <- function(d, nu) {
    nfrail <- length(d)
    maxd <- max(d)
    if (nu > 1e7*maxd) term1 <- sum(d*d)/nu  #second order Taylor series
    else               term1 <- sum(d + nu*log(nu/(nu+d)))  #easy part
   
    tbl <- table(factor(d[d>0], levels=1:maxd))
    ctbl<- rev(cumsum(rev(tbl)))   
    dlev<- 1:maxd
    term2.numerator <- nu + rep(dlev-1, ctbl)
    term2.denom     <- nu + rep(dlev, tbl*dlev)
    term2 <- sum(log(term2.numerator/term2.denom))

    term1 + term2
    }
   
# SCCS @(#)frailty.gaussian.s	1.5 05/30/00
# 
# Defining function for gaussian frailty fits
#
frailty.gaussian <- function(x, sparse=(nclass >5), theta, df, 
		   method=c("reml", "aic", "df", "fixed"), ...) {

    nclass <- length(unique(x))
    # Check for consistency of the arguments
    if (missing(method)) {
	if (!missing(theta)) {
	    method <- 'fixed'
	    if (!missing(df)) 
		    stop("Cannot give both a df and theta argument")
	    }
	else if (!missing(df)) {
	    if (df==0) method <- "aic"
	    else       method <- 'df'
	    }
	}
    method <- match.arg(method)
    if (method=='df' && missing(df)) stop("Method = df but no df argument")
    if (method=='fixed' && missing(theta))
	    stop("Method= fixed but no theta argument")
    if (method !='fixed' && !missing(theta)) 
	    stop("Method is not 'fixed', but have a theta argument")

    if (sparse){
	x <-as.numeric(as.factor(x))
	class(x) <- "coxph.penalty"
	}
    else{
	x <- as.factor(x)
	class(x) <- c("coxph.penalty",class(x))
	attr(x,'contrasts') <- contr.treatment(nclass,contrasts=FALSE)
        }
    if (!missing(theta) & !missing(df)) 
	    stop("Cannot give both a df and theta argument")

    pfun<- function(coef, theta, ndead){
	if (theta==0) list(recenter=0, penalty=0, flag=TRUE)
	  else {
	      recenter <- mean(coef)
	      coef <- coef - recenter
	      list(recenter = recenter,
		   first=   coef/theta,
		   second=  rep(1, length(coef))/theta,
#		   penalty= -sum(log(dnorm(coef,0, sqrt(theta))),
                   penalty= 0.5* sum(coef^2/theta + log(2*pi*theta)),
		   flag=FALSE)
	           }
	   }

     printfun <- function(coef, var, var2, df, history) {
	if (!is.null(history$history)) 
	     theta <- history$history[nrow(history$history),1]
	else theta <- history$theta
		
	if (is.matrix(var)) test <- coxph.wtest(var, coef)$test
	else 		    test <- sum(coef^2/var)
	df2 <- max(df, .5)      # Stop silly p-values
	list(coef=c(NA, NA, NA, test, df, 1-pchisq(test, df2)),
		 history=paste("Variance of random effect=", format(theta)))
	}

   if (method=='reml') {
	temp <- list(pfun=pfun, 
		     printfun=printfun,
		     diag =TRUE,
		     sparse= sparse,
		     cargs = c('coef', 'trH', 'loglik'),
		     cfun = frailty.controlgauss,
		     cparm= list( ...))
	}
    else if (method=='fixed') {
	temp <- list(pfun=pfun,
		     printfun = printfun,
		     diag =TRUE,
		     sparse= sparse,
		     cfun = function(parms, iter, old){
		          list(theta=parms$theta, done=TRUE)},
		     cparm= list(theta=theta, ...))
        }
    else if (method=='aic') {
	temp <- list(pfun=pfun,
		     printfun=printfun,
		     diag =TRUE,
		     sparse= sparse,
		     cargs = c("neff", "df", "plik"),	
		     cparm=list(lower=0, init=c(.1,1), ...),
		     cfun = frailty.controlaic)
	}
    else {  #df method
	temp <- list(pfun=pfun,
		     printfun =printfun,
		     diag =TRUE,
		     sparse= sparse,
		     cargs=('df'),
		     cparm=list(df=df, thetas=0, dfs=0,
		                guess=3*df/length(unclass(x)), ...),
                     cfun = frailty.controldf)
	}

    # If not sparse, give shorter names to the coefficients, so that any
    #   printout of them is readable.
    if (!sparse) {
	vname <- paste("gauss", levels(x), sep=':')
	temp <- c(temp, list(varname=vname))
	}
    attributes(x) <- c(attributes(x), temp)
    x
    }

			  
			   
			   
# SCCS @(#)frailty.s	1.2 01/14/99
# 
# Parent function for frailty, calls the actuall working functions
#
frailty <- function(x, distribution = 'gamma', ...) {
    dlist <- c("gamma", "gaussian", "t")
    i <- pmatch(distribution, dlist)
    if (!is.na(i)) distribution <- dlist[i]

    temp <- paste("frailty", distribution, sep='.')
    if (!exists(temp))
	    stop(paste("Function '", temp, "' not found", sep=""))
    (get(temp))(x, ...)
    }

			  
			   
			   
# SCCS @(#)frailty.t.s	1.4 07/10/00
# 
# Defining function for t-distribution frailty fits
#
frailty.t <- function(x, sparse=(nclass>5), theta, df, eps= 1e-5,  tdf=5,
			  method=c("aic", "df", "fixed"), ...) {
    nclass <- length(unique(x))
    if (sparse){
	x <-as.numeric(as.factor(x))
	class(x) <- "coxph.penalty"
        }
    else{
	x <- as.factor(x)
	class(x) <- c("coxph.penalty",class(x))
	attr(x,'contrasts') <- contr.treatment(nclass,contrasts=FALSE)
        }

    if (tdf <=2) stop("Cannot have df <3 for the t-frailty")
    # Check for consistency of the arguments
    if (missing(method)) {
	if (!missing(theta)) {
	    method <- 'fixed'
	    if (!missing(df)) 
		    stop("Cannot give both a df and theta argument")

	    }
	else if (!missing(df)) {
	    if (df==0) method <- 'aic'
	    else       method <- 'df'
	    }
	}
    method <- match.arg(method)
    if (method=='df' && missing(df)) stop("Method = df but no df argument")
    if (method=='fixed' && missing(theta))
	    stop("Method= fixed but no theta argument")
    if (method !='fixed' && !missing(theta)) 
	    stop("Method is not 'fixed', but have a theta argument")

    pfun<- function(coef, theta, ndead, tdf){
	if (theta==0) list(recenter=0, penalty=0, flag=TRUE)
	else {
	    sig <- theta* (tdf-2)/tdf  #scale contant^2 in density formula
	    #
	    # Find the centering constant, using 1 NR step
	    #
	    temp  <- 1 + coef^2/(tdf*sig)
	    temp1 <- coef/temp
	    temp2 <- 1/temp - (2/(tdf*sig))*coef^2/temp^2
	    recenter <- sum(temp1)/sum(temp2)  #NR step towards MLE

	    coef <- coef - recenter
	    const <- (tdf+1)/(tdf*sig)
	    temp  <- 1 + coef^2/(tdf*sig)
	    list(recenter=recenter, 
		 first=   const*coef/temp,
		 second=  const*(1/temp - (2/(tdf*sig))*coef^2/temp^2),
		 penalty= sum(.5*log(pi*tdf*sig) + ((tdf+1)/2)*log(temp) +
		                lgamma(tdf/2) - lgamma((tdf+1)/2)),
		 flag=FALSE)
	    }
	}

    printfun <- function(coef, var, var2, df, history) {
	if (!is.null(history$history)) 
	     theta <- history$history[nrow(history$history),1]
	else theta <- history$theta
	
	if (is.matrix(var)) test <- coxph.wtest(var, coef)$test
	else 		    test <- sum(coef^2/var)
	df2 <- max(df, .5)      # Stop silly p-values
	list(coef=c(NA, NA, NA, test, df, 1-pchisq(test, df2)),
		 history=paste("Variance of random effect=", format(theta)))
	}

    if (method=='fixed') {
	temp <- list(pfun=pfun, pparm=tdf,
		     printfun=printfun,
		     diag =TRUE,
		     sparse= sparse,
		     cfun = function(parms, iter, old){
		          list(theta=parms$theta, done=TRUE)},
		     cparm= list(theta=theta, ...))
        }
    
    else if (method=='aic') {
	temp <- list(pfun=pfun, pparm=tdf,
		     printfun=printfun,
		     diag =TRUE,
		     sparse= sparse,
		     cargs = c("neff", "df", "plik"),	
		     cparm=list(lower=0, init=c(.1,1), eps=eps, ...),
		     cfun = frailty.controlaic)
	}
    else {  #df method
	if (missing(eps)) eps <- .1
	temp <- list(pfun=pfun, pparm=tdf,
		     printfun=printfun,
		     diag =TRUE,
		     sparse= sparse,
		     cargs= c('df'),
		     cparm=list(df=df, eps=eps, thetas=0, dfs=0,
		                guess=3*df/length(unclass(x)), ...),
                     cfun = frailty.controldf)
	}

    # If not sparse, give shorter names to the coefficients, so that any
    #   printout of them is readable.
    if (!sparse) {
	vname <- paste("t", levels(x), sep=':')
	temp <- c(temp, list(varname=vname))
	}
    attributes(x) <- c(attributes(x), temp)
    x
    }

			  
			   
			   
#  SCCS  @(#)is.na.coxph.penalty.s	1.4 02/21/99
# The subscript function for coxph.penalty objects
#  without it the "subset" arg of a model statement tosses
#  away all of the attributes
#
"[.coxph.penalty" <- function(x, ..., drop=FALSE) {
    attlist <- attributes(x)
    attributes(x) <- attlist[match(c('dim', 'dimnames'), names(attlist), 0)] 
    x <- NextMethod('[')  #let the default method do actual subscripting

    # Tack back on all of the old attributes except dim and dimnames,
    #   which will have been properly modified by the standard [ method
    attributes(x) <- c(attributes(x),
                       attlist[is.na(match(names(attlist),
                                           c("dim", "dimnames")))])
    return(x)
}
			  

is.na.coxph.penalty <- function(x) {
    if (is.matrix(x))
        is.na(c(unclass(x) %*% rep(1,ncol(x))))
    else
        is.na(unclass(x))
}
#
# SCCS @(#)is.ratetable.s	4.6 08/01/98
#
is.ratetable <- function(x, verbose=FALSE) {
    if (!verbose) {
	if (!inherits(x, 'ratetable')) return(FALSE)
	att <- attributes(x)
	if (any(is.na(match(c("dim", "dimnames", "dimid",
                              "factor", "cutpoints"),
			    names(att)))))
            return(FALSE)
	nd <- length(att$dim)
	if (length(x) != prod(att$dim))
            return(FALSE)
	if (!(is.list(att$dimnames) && is.list(att$cutpoints)))
		 return(FALSE)
	if (length(att$dimnames)!=nd || length(att$factor)!=nd ||
			 length(att$cutpoints)!=nd)
            return(FALSE)
	fac <- as.numeric(att$factor)
	if (any(is.na(fac)))
            return(FALSE)
	if (any(fac <0))
            return(FALSE)
	for (i in 1:nd) {
	    n <- att$dim[i]
	    if (length(att$dimnames[[i]]) !=n)
                return(FALSE)
	    if (fac[i]!=1 && length(att$cutpoints[[i]])!=n)
                return(FALSE)
	    if (fac[i]!=1 && any(order(att$cutpoints[[i]])!= 1:n))
                return(FALSE)
	    if (fac[i]==1 && !is.null(att$cutpoints[[i]]))
                return(FALSE)
	    if (fac[i]>1 && i<nd)
                return(FALSE)
	    }
	return(TRUE)
    }

###verbose return messages, useful for debugging
    msg <- NULL
    if (!inherits(x, 'ratetable')) msg <- c(msg, "wrong class")
    att <- attributes(x)
    if (any(is.na(match(c("dim", "dimnames", "dimid", "factor", "cutpoints"),
			names(att))))) msg <- c(msg, 'missing an attribute')
    nd <- length(att$dim)
    if (length(x) != prod(att$dim)) msg <- c(msg, 'dims dont match length')
    if (!(is.list(att$dimnames) && is.list(att$cutpoints)))
	     msg <- c(msg, 'dimnames or cutpoints not a list')
    if (length(att$dimnames)!=nd || length(att$factor)!=nd ||
		     length(att$cutpoints)!=nd) msg <- c(msg, 'bad lengths')
    fac <- as.numeric(att$factor)
    if (any(is.na(fac))) msg <- c(msg, 'a missing factor')
    if (any(fac <0)) msg <- c(msg, 'factor <0')
    for (i in 1:nd) {
	n <- att$dim[i]
	if (length(att$dimnames[[i]]) !=n) 
		msg <- c(msg, 'dimnames wrong length')
	if (fac[i]!=1 && length(att$cutpoints[[i]])!=n) 
		msg <- c(msg, 'cutpnt missing')
	if (fac[i]!=1 && any(order(att$cutpoints[[i]])!= 1:n)) 
		msg <- c(msg, 'unsorted cutpoints')
	if (fac[i]==1 && !is.null(att$cutpoints[[i]]))  
		msg <- c(msg, 'cutpnt should be null')
	if (fac[i]>1 && i<nd) 
		msg <- c(msg, 'only the last dim can be interpolated')
	}
    if (length(msg)==0)
        TRUE
    else
        msg
    }
# SCCS @(#)labels.survreg.s	1.1 01/06/99
#labels.survreg <- labels.lm
labels.survreg<-function(object,...) attr(object$terms,"term.labels")
# SCCS @(#)lines.survfit.s	4.16  01/14/99
lines.survfit <- function(x, type='s', mark=3, col=1, lty=1, lwd=1,
			  mark.time =TRUE, xscale=1, 
			  firstx=0, firsty=1, xmax, fun,
			  conf.int=FALSE, ...) {

    if (inherits(x, 'survexp')) {
	if (missing(type)) type <- 'l'
	if (!is.numeric(mark.time)) mark.time <- FALSE
	}
    if (inherits(x, 'survfit.coxph')) {
	if (!is.numeric(mark.time)) mark.time <- FALSE
	}

    if (is.character(conf.int)) {
	if (conf.int=='only') {
	    conf.int <- TRUE
	    plot.surv<- TRUE
	    }
	else stop("Unrecognized option for conf.int")
	}
    else plot.surv <- TRUE

    if (is.numeric(mark.time)) mark.time<- sort(unique(mark.time[mark.time>0]))

    if (is.matrix(x$surv)) {
	ncol.per.strat <- ncol(x$surv)
	ncurve <- ncol(x$surv)
	coffset <- nrow(x$surv)*(1:ncurve -1)     #within matrix offset
        }
    else {
	ncol.per.strat <- 1
	ncurve <- 1
	coffset <- 0
        }

    if (is.null(x$strata)) {
	nstrat <- 1
	stemp <- rep(1, length(x$time))
	}
    else {
	nstrat <- length(x$strata)
	ncurve <- ncurve * nstrat
	stemp <- rep(1:nstrat, x$strata)
	}

    ssurv <- x$surv
    stime <- x$time
    supper <- x$upper
    slower <- x$lower
    if (!missing(xmax) && any(x$time>xmax)) {
	# prune back the survival curves
	# I need to replace x's over the limit with xmax, and y's over the
	#  limit with either the prior y value or firsty
	keepx <- keepy <- NULL  # lines to keep
	yzero <- NULL           # if all points on a curve are < xmax
	tempn <- table(stemp)
	offset <- cumsum(c(0, tempn))
	for (i in 1:nstrat) {
	    ttime <-stime[stemp==i]
	    if (all(ttime <= xmax)) {
		keepx <- c(keepx, 1:tempn[i] + offset[i])
		keepy <- c(keepy, 1:tempn[i] + offset[i])
		}
	    else {
		bad <- min((1:tempn[i])[ttime>xmax])
		if (bad==1)  {
		    keepy <- c(keepy, 1+offset[i])
		    yzero <- c(yzero, 1+offset[i])
		    }
		else  keepy<- c(keepy, c(1:(bad-1), bad-1) + offset[i])
		keepx <- c(keepx, (1:bad)+offset[i])
		stime[bad+offset[i]] <- xmax
		x$n.event[bad+offset[i]] <- 1   #don't plot a tick mark
		}
	    }

	# ok, now actually prune it
	stime <- stime[keepx]
	stemp <- stemp[keepx]
	x$n.event <- x$n.event[keepx]
	if (is.matrix(ssurv)) {
	    if (length(yzero)) ssurv[yzero,] <- firsty
	    ssurv <- ssurv[keepy,,drop=FALSE]
	    if (!is.null(supper)) {
		if (length(yzero)) supper[yzero,] <- slower[yzero,] <- firsty
		supper <- supper[keepy,,drop=FALSE]
		slower <- slower[keepy,,drop=FALSE]
		}
	    }
	else {
	    if (length(yzero)) ssurv[yzero] <- firsty
	    ssurv <- ssurv[keepy]
	    if (!is.null(supper)) {
		if (length(yzero)) supper[yzero] <- slower[yzero] <- firsty
		supper <- supper[keepy]
		slower <- slower[keepy]
		}
	    }
	}
	stime <- stime/xscale
    	
    if (!missing(fun)) {
	if (is.character(fun)) {
	    tfun <- switch(fun,
                           'log' = function(x) x,
                           'event'=function(x) 1-x,
                           'cumhaz'=function(x) -log(x),
                           'cloglog'=function(x) log(-log(x)),
                           'pct' = function(x) x*100,
                           'logpct'= function(x) 100*x,
                           stop("Unrecognized function argument")
                           )
        }
	else if (is.function(fun)) tfun <- fun
	else stop("Invalid 'fun' argument")
	
	ssurv <- tfun(ssurv)
	if (!is.null(supper)) {
	    supper <- tfun(supper)
	    slower <- tfun(slower)
	    }
	firsty <- tfun(firsty)
        }
    else {
	firsty  <- firsty
	}

    strata <- table(stemp)
    soffset<- ncol.per.strat * c(0, cumsum(strata))
    mark <- rep(mark, length=ncurve)
    col  <- rep(col , length=ncurve)
    lty  <- rep(lty , length=ncurve)
    lwd  <- rep(lwd , length=ncurve)
    time <- rep(stime, ncol.per.strat)


    if (type=='s') {
	type=='l'
	dostep <- function(x,y) {
	    n <- length(x)
	    if (n >2) {
		# replace verbose horizonal sequences like
		# (1, .2), (1.4, .2), (1.8, .2), (2.3, .2), (2.9, .2), (3, .1)
		# with (1, .2), (3, .1).  They are slow, and can smear the 
		# looks of the line type.
		dupy <- c(TRUE, diff(y[-n]) !=0, TRUE)
		n2 <- sum(dupy)
		
		#create a step function
		xrep <- rep(x[dupy], c(1, rep(2, n2-1)))
		yrep <- rep(y[dupy], c(rep(2, n2-1), 1))
		list(x=xrep, y=yrep)
		}
	    else if (n==1) list(x=x, y=y)
	    else  list(x=x[c(1,2,2)], y=y[c(1,1,2)])
	    }	
	}
    else dostep <- function(x,y) list(x=x, y=y)

    k <- 0
    xend <- yend <- NULL
    for (i in 1:nstrat) {
      for (j in 1:ncol.per.strat) {
	k <- k +1  
	who <- seq(soffset[i]+ coffset[j]+1, length=strata[i])  
	if (is.finite(firstx) && is.finite(firsty)) {
	    xx <- c(firstx, time[who])
	    yy <- c(firsty, ssurv[who])
	    yyu<- c(firsty, supper[who])
	    yyl<- c(firsty, slower[who])
	    deaths <- c(-1, ssurv$n.event[who])
	    }
	else {
	    xx <- time[who]
	    yy <- ssurv[who]
	    yyu<- supper[who]
	    yyl<- slower[who]
	    deaths <- ssurv$n.event[who]
	    }
	nn <- length(xx)

	if (conf.int) {
	    lines(dostep(xx,yyl), type=type, col=col[k], 
		  lty=lty[k], lwd=lwd[k], ...)
	    lines(dostep(xx, yyu), type=type, col=col[k], 
		  lty=lty[k], lwd=lwd[k], ...)
	    }

	xend <- c(xend,max(xx))
	yend <- c(yend,min(yy))
	if (plot.surv) { 
	    lines(dostep(xx, yy), type=type, col=col[k], 
		      lty=lty[k], lwd=lwd[k], ...)
	    if (is.numeric(mark.time)) {
		indx <- mark.time
		for (k in seq(along=mark.time))
			indx[k] <- sum(mark.time[k] > xx)
		points(mark.time[indx<nn], yy[indx[indx<nn]],
		       pch=mark[k],col=col[k], ...)
		}
	    else if (mark.time==TRUE) {
		if ( any(deaths==0))
			points(xx[deaths==0], yy[deaths==0],
				   pch=mark[k],col=col[k], ...)
		}
	    }
	}
      }
    invisible(list(x=xend, y=yend))
    }
# SCCS @(#)match.ratetable.s	4.5 11/18/97
# Do a set of error checks on whether the ratetable() vars match the
#   actual ratetable
# This is called by pyears and survexp, but not by users
#
# Returns a subscripting vector and a call
#
match.ratetable <- function(R, ratetable) {
    attR <- attributes(R)
    attributes(R) <- attR['dim']     #other attrs get in the way later
    if (!is.ratetable(ratetable)) stop("Invalid rate table")
    dimid <- attr(ratetable, 'dimid')
    ord <- match(attR$dimnames[[2]], dimid)
    if (any(is.na(ord)))
       stop(paste("Argument '", (attR$dimnames[[2]])[is.na(ord)],
	    "' in ratetable()",
	    " does not match the given table of event rates", sep=''))
    nd <- length(ord)
    if (nd != length(dimid))
	stop("The ratetable() call has the wrong number of arguments")
    ord[ord] <- 1:nd   #reverse the index, so "ord" can be on the right-hand
    R <- R[,ord,drop=FALSE]

    # Check out the dimensions of R --
    const <- attR[["constants"]][ord]
    call <- "ratetable["
    levlist <- attR[['levlist']][ord]
    dtemp <-dimnames(ratetable)
    efac  <- attr(ratetable, 'factor')
    for (i in (1:nd)) {
	if (const[i]) {   #user put in a constant
	    temp <- match(levlist[[i]], dtemp[[i]])
	    if (is.na(temp)) {
		temp <- as.numeric(levlist[[i]])
		if (is.na(temp))
		       stop(paste("Invalid value in ratetable() for variable",
				 dimid[i]))
		if (efac[i]==1) {  # this level is a factor
		    if (temp<=0 || temp!=floor(temp) || temp >length(dtemp[[i]]))
		       stop(paste("Invalid value in ratetable() for variable",
				 dimid[i]))
		    }
		else stop(paste("Invalid value in ratetable() for variable",
					dimid[i]))
		}
	    R[,i] <- temp
	    call <- paste(call, temp)
	    }
	else if (length(levlist[[i]]) >0) {  #factor or character variable
	    if (efac[i]!=1) stop(paste("In ratetable(),", dimid[i],
				     "must be a continuous variable"))
	    temp <- match(levlist[[i]], dtemp[[i]])
	    if (any(is.na(temp)))
		stop(paste("Levels do not match for ratetable() variable",
			    dimid[i]))
	    R[,i] <- temp[R[,i]]
	    }
	else {   # ratetable() thinks it is a continuous variable
	    if (efac[i]==1) {   #but it's not-- make sure it is an integer
		temp <- R[,i]
		if (any(floor(temp)!=temp) || any(temp<=0) ||
			    max(temp) > length(dtemp[[i]]))
		stop(paste("In ratetable(),",dimid[i],"is out of range"))
		}
	    }
	if (i==nd) call <- paste(call, "]")
	else       call <- paste(call, ",")
	}

    summ <- attr(ratetable, 'summary')
    if (is.null(summ))
	 list(R= R[,!const, drop=FALSE], call={if(any(const)) call else NULL})
    else list(R= R[,!const, drop=FALSE], call={if(any(const)) call else NULL},
		summ=summ(R))
    }
#  SCCS  @(#)model.frame.coxph.s	4.4 02/21/99
model.frame.coxph <- function(formula, ...) {
    Call <- formula$call
    Call[[1]] <- as.name("model.frame")
    Call <- Call[match(c("", "formula", "data", "weights", "subset",
			   "na.action"), names(Call), 0)]
    dots <- list(...)
    nargs <- dots[match(c("data", "na.action", "subset"), names(dots), 0)]
    Call[names(nargs)] <- nargs
    # coxph has a 'formula' component so this is OK
    env <- environment(formula(formula))
    if (is.null(env)) env <- parent.frame()
    eval(Call, env)
    }
# SCCS @(#)model.frame.survreg.s	1.1 11/25/98
model.frame.survreg <- function(formula, ...) {
    Call <- formula$call
    Call[[1]] <- as.name("model.frame")
    Call <- Call[match(c("", "formula", "data", "weights", "subset",
			   "na.action"), names(Call), 0)]
    dots <- list(...)
    nargs <- dots[match(c("data", "na.action", "subset"), names(dots), 0)]
    Call[names(nargs)] <- nargs
    env<-environment(formula$terms)
    if (is.null(env)) env<-parent.frame()
    eval(Call, env)
    }
#SCCS 04/14/92 @(#)model.newframe.s	4.3
# This function is called if you want to get a new data frame,
#   usually for prediction.  It's main problem is to "glue" any
#   transform specific information back onto the formula, so that
#   data dependent transforms work as they used to.
# It only works if the data dependent functions are not inside another one,
#   so  sqrt(age - min(age)) is out of luck.  It also only works for those
#   transforms that support it by adding data dependent info as an attribute
#   of their output.
# If you know this isn't so, then safe=T uses a method that is much longer,
#   but is guarranteed to work, see predict.gam

model.newframe <- function(object, newdata, safe=FALSE, response=FALSE, ...) {
    if (inherits(object, 'terms'))  Terms <- object
    else {
	Terms <- object$terms
	if (!inherits(Terms, 'terms'))
	    stop ("Invalid terms component of object")
	}
    offset <- attr(Terms, 'offset')

    # First, is newdata just a list of numbers?
    if (is.numeric(newdata)) {
	nvar <- length(attr(Terms,"term.labels")) + length(offset)
	if (length(newdata)>1  || newdata!=floor(newdata)  || newdata<0){ #It's not just a frame number
	    if (is.matrix(newdata) && ncol(newdata) == nvar)
		   return(newdata)
	    else if (length(newdata) == nvar)
		   return(matrix(newdata,1,nvar))
	    else stop("Argument \"newdata\" cannot be coerced to an appropriate model matrix")
	    }
	}

    # newdata is a list, data frame, or frame number
    if (!safe) {
	#augment the arguments with extra parameters
	  #someday
	if (!response) Terms <- delete.response(Terms)
	model.frame(Terms, newdata, ...)
	}
    else {
	#Do a safe call, by building up a brand new model frame
	Call <- object$call
	Call[[1]] <- as.name("model.frame")
	Call$formula <- terms.inner(formula(object))
   #might need to tack on the response here!
	if (response) stop("Not implimented yet for safe=TRUE, response=TRUE")
	Call$na.action <- function(x)  x
	Call <- Call[match(c("", "formula", "data", "subset", "na.action"),
	    names(Call), 0)]
	data <- eval(Call)
	attr(data, "terms") <- NULL
	Call$subset <- NULL
	Call$data <- substitute(newdata)
	newdata <- eval(Call)
	attr(newdata, "terms") <- NULL
	d2 <- dim(newdata)
	if(d2[1] < 1)
	    stop("0 rows in newdata")
	d1 <- dim(data)
	if(d1[2] != d2[2])  #newdata missing some variables
	    data <- data[, names(newdata), drop = FALSE]
	data[seq(d1[1] + 1, d1[1] + d2[1]),  ] <- newdata  #rbind the new on
	attr(data, "row.names") <- c(rep("OLD DATA",d1[1]), row.names(newdata))
	#Now compute the combined model frame, excluding the response
	na.action <- eval(object$call$na.action)
	Terms <- object$terms
	Terms <- delete.response(Terms)
	model.frame(Terms, data, na.action = na.action)
	}
    }
#SCCS @(#)plot.cox.zph.s	4.6 08/13/96
plot.cox.zph <- function(x, resid=TRUE, se=TRUE, df=4, nsmo=40, var, ...) {
    ##require(splines)
    xx <- x$x
    yy <- x$y
    d <- nrow(yy)
    df <- max(df)     #error proofing
    nvar <- ncol(yy)
    pred.x <- seq(from=min(xx), to=max(xx), length=nsmo)
    temp <- c(pred.x, xx)
    lmat <- ns(temp, df=df, intercept=TRUE)
    pmat <- lmat[1:nsmo,]       # for prediction
    xmat <- lmat[-(1:nsmo),]
    qmat <- qr(xmat)

    if (se) {
	bk <- backsolve(qmat$qr[1:df, 1:df], diag(df))
	xtx <- bk %*% t(bk)
	seval <- d*((pmat%*% xtx) *pmat) %*% rep(1, df)
	}

    ylab <- paste("Beta(t) for", dimnames(yy)[[2]])
    if (missing(var)) var <- 1:nvar
    else {
	if (is.character(var)) var <- match(var, dimnames(yy)[[2]])
	if  (any(is.na(var)) || max(var)>nvar || min(var) <1)
	    stop("Invalid variable requested")
	}

    #
    # Figure out a 'good' set of x-axis labels.  Find 8 equally spaced
    #    values on the 'transformed' axis.  Then adjust until they correspond
    #    to rounded 'true time' values.  Avoid the edges of the x axis, or
    #    approx() may give a missing value
    if (x$transform == 'log') {
	xx <- exp(xx)
	pred.x <- exp(pred.x)
	}
    else if (x$transform != 'identity') {
	xtime <- as.numeric(dimnames(yy)[[1]])
	apr1  <- approx(xx, xtime, seq(min(xx), max(xx), length=17)[2*(1:8)])
	temp <- signif(apr1$y,2)
	apr2  <- approx(xtime, xx, temp)
	xaxisval <- apr2$y
	xaxislab <- rep("",8)
	for (i in 1:8) xaxislab[i] <- format(temp[i])
	}

    for (i in var) {
	y <- yy[,i]
	yhat <- pmat %*% qr.coef(qmat, y)
	if (resid) yr <-range(yhat, y)
	else       yr <-range(yhat)
	if (se) {
	    temp <- 2* sqrt(x$var[i,i]*seval)
	    yup <- yhat + temp
	    ylow<- yhat - temp
	    yr <- range(yr, yup, ylow)
	    }

	if (x$transform=='identity')
	    plot(range(xx), yr, type='n', xlab="Time", ylab=ylab[i], ...)
	else if (x$transform=='log')
	    plot(range(xx), yr, type='n', xlab="Time", ylab=ylab[i], log='x',
			...)
	else {
	    plot(range(xx), yr, type='n', xlab="Time", ylab=ylab[i],
                 axes=FALSE,...)
	    axis(1, xaxisval, xaxislab)
	    axis(2)
	    box()
	    }
	if (resid) points(xx, y)
	lines(pred.x, yhat)
	if (se) {
	    lines(pred.x, yup,lty=2)
	    lines(pred.x, ylow, lty=2)
	    }
	}
    }
#SCCS @(#)plot.survfit.s	4.19 07/09/00
plot.survfit<- function(x, conf.int,  mark.time=TRUE,
			mark=3,col=1,lty=1, lwd=1, cex=1, log=FALSE,
			xscale=1, yscale=1, 
			firstx=0, firsty=1,
			xmax, ymin=0,
			fun,
			xlab="", ylab="", xaxs='S', bty=NULL,legend.text=NULL,
                        legend.pos=0,legend.bty="n",main=NULL,...) {

    mintime <- min(x$time)
    firstx <- min(firstx,mintime)
 
    if (!is.null(x$new.start))
	    firstx <- x$new.start
    
    firstx <- firstx/xscale

    if (is.logical(log)) {
	logy <- log
	logx <- FALSE
	if (logy) logax <- 'y'
	else      logax <- ""
        }
    else {
	logy <- (log=='y' || log=='xy')
	logx <- (log=='x' || log=='xy')
	logax  <- log
        }

    if (!inherits(x, 'survfit'))
	    stop("First arg must be the result of survfit")

    if (missing(conf.int)) {
	if (is.null(x$strata) && !is.matrix(x$surv)) conf.int <-TRUE
	else conf.int <- FALSE
        }

    if (is.null(x$strata)) {
	nstrat <- 1
	stemp <- rep(1, length(x$time))
        }
    else {
	nstrat <- length(x$strata)
	stemp <- rep(1:nstrat,x$ntimes.strata)
	##stemp <- rep(1:nstrat,x$strata)
        }

    ssurv <- x$surv
    stime <- x$time
    supper <- x$upper
    slower <- x$lower
    if (!missing(xmax) && any(x$time>xmax)) {
	# prune back the survival curves
	# I need to replace x's over the limit with xmax, and y's over the
	#  limit with either the prior y value or firsty
	keepx <- keepy <- NULL  # lines to keep
	yzero <- NULL           # if all points on a curve are < xmax
	tempn <- table(stemp)
	offset <- cumsum(c(0, tempn))
	for (i in 1:nstrat) {
	    ttime <-stime[stemp==i]
	    if (all(ttime <= xmax)) {
		keepx <- c(keepx, 1:tempn[i] + offset[i])
		keepy <- c(keepy, 1:tempn[i] + offset[i])
	        }
	    else {
		bad <- min((1:tempn[i])[ttime>xmax])
		if (bad==1)  {
		    keepy <- c(keepy, 1+offset[i])
		    yzero <- c(yzero, 1+offset[i])
		    } 
		else  keepy<- c(keepy, c(1:(bad-1), bad-1) + offset[i])
		keepx <- c(keepx, (1:bad)+offset[i])
		stime[bad+offset[i]] <- xmax
		x$n.event[bad+offset[i]] <- 1   #don't plot a tick mark
	        }
	    }	

	# ok, now actually prune it
	stime <- stime[keepx]
	stemp <- stemp[keepx]
	x$n.event <- x$n.event[keepx]
	if (is.matrix(ssurv)) {
	    if (length(yzero))
		    ssurv[yzero,] <- firsty
	    ssurv <- ssurv[keepy,,drop=FALSE]
	    if (!is.null(supper)) {
		if (length(yzero)) supper[yzero,] <- slower[yzero,] <- firsty
		supper <- supper[keepy,,drop=FALSE]
		slower <- slower[keepy,,drop=FALSE]
	        }
	    }
	else {
	    if (length(yzero)) ssurv[yzero] <- firsty
	    ssurv <- ssurv[keepy]
	    if (!is.null(supper)) {
		if (length(yzero)) supper[yzero] <- slower[yzero] <- firsty
		supper <- supper[keepy]
		slower <- slower[keepy]
  	        }
	    }
        }
    stime <- stime/xscale
    	
    if (!missing(fun)) {
	if (is.character(fun)) {
	    tfun <- switch(fun,
			   'log' = function(x) x,
			   'event'=function(x) 1-x,
			   'cumhaz'=function(x) -log(x),
			   'cloglog'=function(x) log(-log(x)),
			   'pct' = function(x) x*100,
			   'logpct'= function(x) 100*x,
			   stop("Unrecognized function argument")
			   )
	    if (fun=='log'|| fun=='logpct') logy <- TRUE

	    if (fun=='cloglog') {
		logx <- TRUE
		if (logy) logax <- 'xy'
		else logax <- 'x'
	        }
	    }
	else if (is.function(fun)) tfun <- fun
        else stop("Invalid 'fun' argument")
	
	ssurv <- tfun(ssurv )
	if (!is.null(supper)) {
	    supper <- tfun(supper)
	    slower <- tfun(slower)
	    }
	firsty <- tfun(firsty)
	ymin <- tfun(ymin)
        }

    if (is.null(x$n.event)) mark.time <- FALSE   #expected survival curve

    # set default values for missing parameters
    if (is.matrix(ssurv)) ncurve <- nstrat * ncol(ssurv)
    else 		  ncurve <- nstrat

    mark <- rep(mark, length=ncurve)
    col  <- rep(col, length=ncurve)
    lty  <- rep(lty, length=ncurve)
    lwd  <- rep(lwd, length=ncurve)

    if (is.numeric(mark.time)) mark.time <- sort(mark.time)

    # Do axis range computations
    if (xaxs=='S') {
	#special x- axis style for survival curves
	xaxs <- 'i'  #what S thinks
	tempx <- max(stime) * 1.04
        }
    else tempx <- max(stime)
    tempx <- c(firstx, tempx, firstx)

    if (logy) {
	tempy <-  range(ssurv[is.finite(ssurv)& ssurv>0])
	if (tempy[2]==1) tempy[2] <- .99
	if (any(ssurv==0)) {
	    tempy[1] <- tempy[1]*.8
	    ssurv[ssurv==0] <- tempy[1]
	    if (!is.null(supper)) {
		supper[supper==0] <- tempy[1]
		slower[slower==0] <- tempy[1]
	        }
	    }
	tempy <- c(tempy, firsty)
        }
    else tempy <- c(range(ssurv[is.finite(ssurv)] ), firsty)
    
    if (missing(fun)) {
	tempx <- c(tempx, firstx)
	tempy <- c(tempy, ymin)
        }
    #
    # Draw the basic box
    #
    plot(tempx, tempy*yscale, type='n', log=logax,
	                  xlab=xlab, ylab=ylab, xaxs=xaxs,main=main, bty=bty,...)
    if(yscale != 1) {
	if (logy) par(usr =par("usr") -c(0, 0, log10(yscale), log10(yscale))) 
	else par(usr =par("usr")/c(1, 1, yscale, yscale))   
        }

    #
    # put up the curves one by one
    #   survfit has already put them into the "right" order
    dostep <- function(x,y) {
	if (is.na(x[1] + y[1])) {
	    x <- x[-1]
	    y <- y[-1]
	    }
	n <- length(x)
	if (n > 2) {
	    # replace verbose horizonal sequences like
	    # (1, .2), (1.4, .2), (1.8, .2), (2.3, .2), (2.9, .2), (3, .1)
            # with (1, .2), (3, .1).  They are slow, and can smear the looks
	    # of the line type.
	    dupy <- c(TRUE, diff(y[-n]) !=0, TRUE)
	    n2 <- sum(dupy)

	    #create a step function
	    xrep <- rep(x[dupy], c(1, rep(2, n2-1)))
	    yrep <- rep(y[dupy], c(rep(2, n2-1), 1))
	    list(x=xrep, y=yrep)
            }
	else if (n==1) list(x=x, y=y)
	else           list(x=x[c(1,2,2)], y=y[c(1,1,2)])
        }

    i <- 0
    xend <- NULL
    yend <- NULL

    for (j in unique(stemp)) {
	who <- (stemp==j)
	xx <- c(firstx, stime[who])
	nn <- length(xx)
	if (x$type == 'counting') {
	    deaths <- c(-1, x$exit.censored[who])
	    zero.one <- 1
	    }
	else if (x$type == 'right') {
	    deaths <- c(-1, x$n.event[who])
	    zero.one <- 0
	    }
	if (is.matrix(ssurv)) {
	    for (k in 1:ncol(ssurv)) {
		i <- i+1
		yy <- c(firsty, ssurv[who,k])
		lines(dostep(xx, yy), lty=lty[i], col=col[i], lwd=lwd[i]) 

		if (is.numeric(mark.time)) {
		    indx <- mark.time
		    for (k in seq(along=mark.time))
			indx[k] <- sum(mark.time[k] > xx)
		    points(mark.time[indx<nn], yy[indx[indx<nn]],
			   pch=mark[i],col=col[i],cex=cex)
		    }
		else if (mark.time==TRUE && any(deaths==zero.one)) {
		    points(xx[deaths==zero.one], 
			   yy[deaths==zero.one],
			   pch=mark[i],col=col[i],cex=cex)
		    }

		xend <- c(xend,max(xx))
		yend <- c(yend,min(yy))

		if (conf.int && !is.null(supper)) {
		    if (ncurve==1) lty[i] <- lty[i] +1
		    yy <- c(firsty, supper[who,k])
		    lines(dostep(xx,yy), lty=lty[i], col=col[i], lwd=lwd[i])
		    yy <- c(firsty, slower[who,k])
		    lines(dostep(xx,yy), lty=lty[i], col=col[i], lwd=lwd[i])
		    }
	        }
	    }
	else {
	    i <- i+1
	    yy <- c(firsty, ssurv[who])
	    lines(dostep(xx, yy), lty=lty[i], col=col[i], lwd=lwd[i])

	    if (is.numeric(mark.time)) {
		indx <- mark.time
		for (k in seq(along=mark.time))
		    indx[k] <- sum(mark.time[k] > xx)
		points(mark.time[indx<nn], yy[indx[indx<nn]],
		       pch=mark[i],col=col[i],cex=cex)
	        }
	    else if (mark.time && any(deaths==zero.one)) {
		points(xx[deaths==zero.one], 
		       yy[deaths==zero.one],
		       pch=mark[i],col=col[i],cex=cex)
	        }

	    xend <- c(xend,max(xx))
	    yend <- c(yend,min(yy))

	    if (conf.int && !is.null(supper)) {
		if (ncurve==1) lty[i] <- lty[i] +1
		yy <- c(firsty, supper[who])
		lines(dostep(xx,yy), lty=lty[i], col=col[i], lwd=lwd[i])
		yy <- c(firsty, slower[who])
		lines(dostep(xx,yy), lty=lty[i], col=col[i], lwd=lwd[i])
	        }
	    }
        }

    ##legend
    if (!is.null(legend.text)){
        xc <- par("cex") * xinch(par("cin")[1], warn.log = FALSE)
        if (is.list(legend.pos)) legend.pos<-unlist(legend.pos)
        if (length(legend.pos)==2){
            llx<-legend.pos[1]
            lly<-legend.pos[2]
        }
        else if (legend.pos==1){
            lly<-yscale*0.95
            llx<-max(tempx)-max(strwidth(legend.text))-6*xc
        }
        else if (legend.pos==0){
            llx<-0.95*min(tempx)+0.05*max(tempx)
            lly<-ymin+(2+length(legend.text))*max(strheight(legend.text))
        }
        legend(llx,lly,legend=legend.text,lty=lty,bty=legend.bty,col=col,...)
    }
    invisible(list(x=xend, y=yend))
    }







#SCCS 04/14/92 @(#)points.survfit.s	4.2
points.survfit <- function(x, ...) {
    if (!is.matrix(x$surv))
	    points(x$time, x$surv, ...)
    else
	    matpoints(x$time, x$surv, ...)
    }
# SCCS @(#)predict.coxph.penal.s	5.3 11/25/98
predict.coxph.penal <- function(object,  newdata, 
				type=c("lp", "risk", "expected", "terms"),
				se.fit=FALSE, terms=names(object$assign), 
				collapse, safe=FALSE, ...) {
 
    type <- match.arg(type)
    n <- object$n
    Terms <- object$terms
    pterms <- object$pterms
    # If there are no sparse terms
    if (!any(pterms==2) || type=='expected' || 
	(missing(newdata) && se.fit==FALSE && type!='terms')) NextMethod('predict',object,...)
    else {
	# treat the sparse term as an offset term
	#  It gets picked up in the linear predictor, so all I need to
	#  do is "X" it out of the model so that it doesn't get picked up
	#  as a part of the X matrix and etc.
	# I know that the sparse term is a single column BTW
	#
	termname <- names(object$pterms)
	sparsename <- termname[object$pterms==2]
	nvar <- length(termname)
	na.action <- object$na.action
	object$na.action <- NULL

	if (missing(newdata) && (se.fit || type=='terms')) {
	    # I need the X matrix
	    x <- object$x
	    if (is.null(x)) {
		temp <- coxph.getdata(object, y=TRUE, x=TRUE, strata=TRUE)
		if (is.null(object$y)) object$y <- temp$y
		if (is.null(object$strata)) object$strata <- temp$strata
		x <- temp$x
		}
	    xvar <- match(sparsename, dimnames(x)[[2]])
	    indx <- as.numeric(as.factor(x[,xvar]))
	    object$x <- x[, -xvar, drop=FALSE]
	    }
	
	if (nvar==1) {
	    # Only the sparse term!
	    if (!missing(newdata)) {
		n <- nrow(as.data.frame(newdata))
		pred <- rep(0,n)
		}
	    se <- sqrt(object$fvar[indx])
	    pred <- object$linear.predictor
	    if (type=='risk') pred <- exp(pred)
	    }
	else {
	    temp <- attr(object$terms, 'term.labels')
	    object$terms <- object$terms[-match(sparsename, temp)]
            temp<-match(sparsename,terms)
            oldTerms<-terms
            if (!is.na(temp)) terms<-terms[-temp]
	    pred <- NextMethod('predict',object,terms=terms,...)
            terms<-oldTerms
	    if (se.fit) {
		se <- pred$se.fit
		pred <- pred$fit
		}

	    if (type=='terms' && missing(newdata)) {
		# In this case (only) I add the sparse term back in
		spterm <- object$frail[indx]
		spstd  <- sqrt(object$fvar[indx])
		if (nvar==2) {
		    if (xvar==2) {
			pred <- cbind(pred, spterm)
			if (se.fit) se <- cbind(se, spstd)
			}
		    else {
			pred <- cbind(spterm, pred)
			if (se.fit) se <- cbind(spstd, se)
			}
		    }
		else {
		    first <- if (xvar==1) 0 else 1:(xvar-1)
		    secnd <- if (xvar==nvar) 0 else  (xvar+1):nvar
		    pred  <- cbind(pred[,first], spterm, pred[,secnd])
		    if (se.fit)
			    se <- cbind(se[,first], spstd, se[,secnd])
		    }
		dimnames(pred) <- list(dimnames(x)[[1]], termname)
		if (se.fit) dimnames(se) <- dimnames(pred)
		}
	    }

	#Expand out the missing values in the result
	# But only if operating on the original dataset
	if (missing(newdata) && !is.null(na.action)) {
	    pred <- naresid(na.action, pred)
	    if (is.matrix(pred)) n <- nrow(pred)
	    else                 n <- length(pred)
	    if(se.fit) se <- naresid(na.action, se)
	    }

	# Collapse over subjects, if requested
	if (!missing(collapse)) {
	    if (length(collapse) != n) 
		    stop("Collapse vector is the wrong length")
	    pred <- rowsum(pred, collapse)
	    if (se.fit) se <- sqrt(rowsum(se^2, collapse))
	    }

	if (se.fit) list(fit=pred, se.fit=se)
	else pred
	}
    }
		


#SCCS 02/15/99 @(#)predict.coxph.s	4.11
#What do I need to do predictions --
#
#linear predictor:  exists
#        +se     :  X matrix
#        +newdata:  means of old X matrix, new X matrix, new offset
#
#risk -- same as lp
#
#expected --    cumulative hazard for subject= baseline haz + time + risk
#        +se :  sqrt(expected)
#      +new  :  baseline hazard function, new time, new x, means of old X,
#                        new offset, new strata
#
#terms -- : X matrix and the means
#    +se  :  ""  + I matrix
#   +new  : new X matrix and the old means + I matrix
predict.coxph <-
function(object, newdata, type=c("lp", "risk", "expected", "terms"),
		se.fit=FALSE,
		terms=names(object$assign), collapse, safe=FALSE, ...)

    {
    type <-match.arg(type)
    n <- object$n
    Terms <- object$terms
    strata <- attr(Terms, 'specials')$strata
    dropx <- NULL
    if (length(strata)) {
	   temp <- untangle.specials(Terms, 'strata', 1)
	   dropx <- temp$terms
	   }
    if (length(attr(Terms, 'specials')$cluster)) {
	temp <- untangle.specials(Terms, 'cluster', 1)
	dropx <- c(dropx, temp$terms)
	}
    if (length(dropx)) Terms2 <- Terms[-dropx]
    else  Terms2 <- Terms

    offset <- attr(Terms, "offset")
    resp <- attr(Terms, "variables")[attr(Terms, "response")]

    if (missing(newdata)) {
	if (type=='terms' || (se.fit && (type=='lp' || type=='risk'))) {
	    x <- object$x
	    if (is.null(x)) {
		x <- model.matrix(Terms2, model.frame(object))[,-1,drop=FALSE]
		}
	    x <- sweep(x, 2, object$means)
	    }
	else if (type=='expected') {
	    y <- object$y
	    if (is.null(y)) {
		m <- model.frame(object)
		y <- model.extract(m, 'response')
		}
	    }
	}
    else {
	if (type=='expected'){
	     m <- model.newframe(Terms, newdata, response=TRUE)
             x <- model.matrix(Terms2, m)[,-1,drop=FALSE]
         }
	else {
            m <- model.newframe(Terms2, newdata)
            x <- model.matrix(delete.response(Terms2), m)[,-1,drop=FALSE]
        }

	x <- sweep(x, 2, object$means)
	if (length(offset)) {
	    if (type=='expected') offset <- as.numeric(m[[offset]])
	    else {
		offset <- attr(Terms2, 'offset')
		offset <- as.numeric(m[[offset]])
		}
	    }
	else offset <- 0
	}

    #
    # Now, lay out the code one case at a time.
    #  There is some repetition this way, but otherwise the code just gets
    #    too complicated.
    if (is.null(object$coef))
        coef<-numeric(0)
    else
        coef <- ifelse(is.na(object$coef), 0, object$coef)
    if (type=='lp' || type=='risk') {
	if (missing(newdata)) {
	    pred <- object$linear.predictors
	    names(pred) <- names(object$residuals)
	    }
	else                  pred <- x %*% coef  + offset
	if (se.fit) se <- sqrt(diag(x %*% object$var %*% t(x)))

	if (type=='risk') {
	    pred <- exp(pred)
	    if (se.fit) se <- se * sqrt(pred)
	    }
	}

    else if (type=='expected') {
	if (missing(newdata)) pred <- y[,ncol(y)] - object$residual
	else  stop("Method not yet finished")
	se   <- sqrt(pred)
	}

    else {  #terms is different for R <TSL>
        asgn <- object$assign
        nterms<-length(terms)
        pred<-matrix(ncol=nterms,nrow=NROW(x))
        if (is.character(terms))
          termnames<-terms
        else
          termnames<-names(object$assign)[terms]
        dimnames(pred)<-list(rownames(x),termnames)
        if (se.fit){
            se<-matrix(ncol=nterms,nrow=NROW(x))
            dimnames(se)<-list(rownames(x),termnames)
            R<-object$var
            ip <- real(NROW(x))
        }
        for (i in 1:nterms){
            ii<-asgn[[terms[i] ]]
            pred[,i]<-x[,ii,drop=FALSE]%*%(coef[ii])
            if (se.fit){
                for(j in (1:NROW(x))){
                    xi<-x[j,ii,drop=FALSE]
                    vci<-R[ii,ii]
                    se[j,i]<-sqrt(sum(xi%*% vci %*%t( xi)))
                }
            }
        }
    }

    ##if (se.fit) se <- drop(se)
    ##pred <- drop(pred)
    ##Expand out the missing values in the result
    # But only if operating on the original dataset
    if (missing(newdata) && !is.null(object$na.action)) {
	pred <- naresid(object$na.action, pred)
        n<-NROW(pred)
	if(se.fit) se <- naresid(object$na.action, se)
	}

    # Collapse over subjects, if requested
    if (!missing(collapse)) {
	if (length(collapse) != n) stop("Collapse vector is the wrong length")
	pred <- rowsum(pred, collapse)
	if (se.fit) se <- sqrt(rowsum(se^2, collapse))
	}

    if (se.fit) list(fit=pred, se.fit=se)
    else pred
    }
# SCCS @(#)predict.survreg.penal.s	1.1 11/30/98
#
# This routine just stops disastrous arithmetic for models with sparse
# terms.  A placeholder until the proper sparse terms actions are inserted.
#
predict.survreg.penal <- function(object, ...) {
    pterms <- object$pterms
    if (any(pterms==2))
	    stop("Predictions not available for sparse models")
    NextMethod('predict')
    }
# SCCS @(#)predict.survreg.s	4.11 02/06/99
predict.survreg <-
    function(object, newdata, type=c('response', "link", 'lp', 'linear',
				     'terms', 'quantile','uquantile'),
				se.fit=FALSE,  terms=NULL,
	                        p=c(.1, .9),...)
    {
#
# What do I need to do predictions ?
#   
#  linear predictor: exists
#           +se    : X matrix
#          newdata : new X matrix
#
#  response -- same as lp, +transform, from distribution
#  
#  p --  density function from distribution
#          scale(s) -- if multiple I need the strata
#          +se : variance matrix
#	   newdata: new X
#
    ripley<-FALSE ##obsolete undocumented option
    type <-match.arg(type)
    if (type=='link') type<- 'lp'  #true until their are link functions
    if (type=='linear') type<- 'lp'
    n <- length(object$linear.predictors)
    Terms <- object$terms
    if(!inherits(Terms, "terms"))
	    stop("invalid terms component of  object")

    strata <- attr(Terms, 'specials')$strata
    Terms <- delete.response(Terms)
    coef <- object$coefficients
    intercept <- attr(Terms, "intercept")
    nvar <- length(object$coef)
    vv <- object$var[1:nvar, 1:nvar]
    fixedscale <- (nvar == ncol(object$var)) || ripley

    if (missing(newdata) && (type=='terms' || se.fit)) need.x <- TRUE
    else  need.x <- FALSE

    if (length(strata) && (type=='quantile' || type=='uquantile') &&
	      !fixedscale) {
	#
	# We need to reconstruct the "strata" variable
	#
	if (is.null(object$model)) m <- model.frame(object)
	else m <- object$model
	temp <- untangle.specials(Terms, 'strata', 1)
	dropx <- temp$terms
	if (length(temp$vars)==1) strata.keep <- m[[temp$vars]]
	else strata.keep <- strata(m[,temp$vars], shortlabel=TRUE)
	strata <- as.numeric(strata.keep)
	nstrata <- max(strata)
	    
	if (missing(newdata) && need.x){
	    x <- object$x
	    if (is.null(x)) x <- model.matrix(Terms[-dropx], m)
	    }

	else if (!missing(newdata)) {
	    newframe <- model.frame(Terms, newdata, na.action=function(x)x)
	    if (length(temp$vars)==1) newstrat <- newframe[[temp$vars]]
	    else newstrat <- strata(newframe[,temp$vars], shortlabel=TRUE)
	    strata <- match(newstrat, levels(strata.keep))
	    x <- model.matrix(Terms[-dropx], newframe)
	    offset <- model.extract(newframe, 'offset')
	    }
	}

    else {  # per subject strata not needed
	temp <- untangle.specials(Terms, 'strata', 1)
	if (length(temp$terms)) Terms <- Terms[-temp$terms]
	strata <- rep(1,n); nstrata<- 1
	if (missing(newdata) && need.x) {
	    x <- object$x
	    if (is.null(x)) {
		if (is.null(object$model)) 
			x <- model.matrix(Terms, model.frame(object))
		else    x <- model.matrix(Terms, object$model)
		}
	    }

	else if (!missing(newdata)) {
	    x <- model.matrix(Terms, newdata)
	    offset <- 0
	    strata <- rep(1, nrow(x))
	    }
	}
    scale <- object$scale[strata]
    #center x if terms are to be computed
    if(type=='p' || (type == "terms" && intercept)) 
	    x <- sweep(x, 2, object$means)

    #
    # Grab the distribution
    #
    if (is.character(object$dist)) dd <- survreg.distributions[[object$dist]]
    else dd <- object$dist
    if (is.null(dd$itrans)) {
	itrans <- function(x) x
        dtrans <- function (x) 1 ## bug fix from TMT, 2002-17-6
	}
    else {
	itrans <- dd$itrans
	dtrans <- dd$dtrans
	}
    if (!is.null(dd$dist)) dd <- survreg.distributions[[dd$dist]]

    #
    # Now, lay out the code one case at a time.
    #  There is some repetition this way, but otherwise the code just gets
    #    too complicated.
    #
    if (type=='lp' || type=='response') {
	if (missing(newdata)) {
 	    pred <- object$linear.predictors
#	    names(pred) <- names(object$residuals)
	    }
	else  pred <- x %*% coef  + offset
	if (se.fit) se <- sqrt(diag(x %*% vv %*% t(x)))

	if (type=='response') {
	    pred <- itrans(pred)
	    if (se.fit) se <- se/ dtrans(pred)
	    }
	}
    else if (type=='quantile' || type=='uquantile') {
	if (missing(newdata)) pred <- object$linear.predictors
	else  pred <- x %*% coef 
	# "pred" is the mean of the distribution,
	#   now add quantiles and then invert
	qq <- dd$quantile(p, object$parm)
	if (length(qq)==1 || length(pred)==1) {
	    pred <- pred + qq*scale
	    if (se.fit && fixedscale) {
		var <- ((x %*% vv) * x) %*% rep(1., ncol(x))
		se <- rep(sqrt(drop(var)), length(qq))
		}
	    else if (se.fit) {
		x.strata <- outer(strata, 1:nstrata, 
				  function(x,y) 1*(x==y))
		se <- matrix(0, ncol=length(qq), nrow=nrow(x))
		for (i in 1:(length(qq))) {
		    temp <- cbind(x, (qq[i]*scale)* x.strata)
		    var <- ((temp %*% object$var) *temp) %*% rep(1, ncol(temp))
		    se[,i] <- sqrt(drop(var))
		    }
		se <- drop(se)
		}
	    }
	else {
	    pred <- c(pred) + outer(scale, qq)
	    if (se.fit && fixedscale) {
		var <- ((x %*% vv) * x) %*% rep(1., ncol(x))
		if (length(qq) >1) {
		    se <- rep(sqrt(drop(var)), length(qq))
		    se <- matrix(se, ncol=length(qq))
		    }
		else se <- sqrt(drop(var))
		}
	    else if (se.fit) {
		x.strata <- outer(strata, 1:nstrata, 
				  function(x,y) 1*(x==y))
		se <- pred
		nc <- rep(1., ncol(object$var))
		for (i in 1:length(qq)) {
		    temp <- cbind(x, (qq[i]*scale)*x.strata)
		    var <- ((temp %*% object$var)* temp) %*% nc
		    se[,i] <- sqrt(drop(var))
		    }
		se <- drop(se)
		}
	    }
	pred <- drop(pred)
	if (type == 'quantile') {
	    pred <- itrans(pred)
	    if (se.fit) se <- se/dtrans(pred)
	    }
	}

    else {  #terms
	asgn <- attrassign(x,Terms)
        hasintercept<-attr(Terms,"intercept")>0
        if (hasintercept)
          asgn$"(Intercept)"<-NULL
        nterms<-length(asgn)
        pred<-matrix(ncol=nterms,nrow=NROW(x))
        dimnames(pred)<-list(rownames(x),names(asgn))
        if (se.fit){
          se<-matrix(ncol=nterms,nrow=NROW(x))
          dimnames(se)<-list(rownames(x),names(asgn))
          R<-object$var
          ip <- real(NROW(x))
        }
        for (i in 1:nterms){
          ii<-asgn[[i]]
          pred[,i]<-x[,ii,drop=FALSE]%*%(coef[ii])
          if (se.fit){
            for(j in (1:NROW(x))){
              xi<-x[j,ii,drop=FALSE]*(coef[ii])
              vci<-R[ii,ii]
              se[j,i]<-sqrt(sum(xi%*% vci %*%t( xi)))
            }
          }
        }
        if (!is.null(terms)){
          pred<-pred[,terms,drop=FALSE]
          if (se.fit)
            se<-se[,terms,drop=FALSE]
        }
      }
    if (type=='terms') attr(pred, 'constant') <- if (hasintercept) coef(object)["(Intercept)"] else 0
    #Expand out the missing values in the result
    # But only if operating on the original dataset
    if (missing(newdata) && !is.null(object$na.action)) {
	pred <- naresid(object$na.action, pred)
	if(se.fit) se <- naresid(object$na.action, se)

	
	}
    if (se.fit) list(fit=pred, se.fit=se)
    else pred
    }
# SCCS @(#)print.cox.zph.s	4.5 09/27/96
print.cox.zph <- function(x, digits = max(options()$digits - 4, 3),...)
    invisible(print(x$table, digits=digits))
#SCCS 12/29/97 @(#)print.coxph.null.s	4.7
print.coxph.null <-
 function(x, digits=max(options()$digits - 4, 3), ...)
    {
    if (!is.null(cl<- x$call)) {
	cat("Call:  ")
	dput(cl)
	cat("\n")
	}

    cat("Null model\n  log likelihood=", format(x$loglik), "\n")
    omit <- x$na.action
    if (length(omit))
	cat("  n=", x$n, " (", naprint(omit), ")\n",
				sep="")
    else cat("  n=", x$n, "\n")
    }
# SCCS @(#)print.coxph.penal.s	1.1 10/28/98
print.coxph.penal <-  function(x, terms=FALSE, maxlabel=25,
			       digits=max(options()$digits - 4, 3), ...) {
    if (!inherits(x, 'coxph.penal')) stop("Invalid object")

    if (!is.null(x$call)) {
	cat("Call:\n")
	dput(x$call)
	cat("\n")
	}
    if (!is.null(x$fail)) {
	cat(" Coxph failed.", x$fail, "\n")
	return()
	}
    savedig <- options(digits = digits)
    on.exit(options(savedig))

    coef <- x$coef
    if (length(coef)==0 && length(x$frail)==0)
	    stop("Penalized print function can't be used for a null model")

    #
    # Map terms to special print functions, and the list of iteration histories
    #
    pterms <- x$pterms
    nterms <- length(pterms)
    npenal <- sum(pterms>0)
    print.map <- rep(0,nterms)
    if (!is.null(x$printfun)) {
	temp <- unlist(lapply(x$printfun, is.null))  #which ones are missing
	print.map[pterms>0] <- (1:npenal) * (!temp)
	}

    # Tedious, but build up the coef matrix a term at a time
    print1 <- NULL
    pname1 <- NULL
    if (is.null(x$assign2)) alist <- x$assign[-1]
    else alist <- x$assign2

    print2 <- NULL
    for (i in 1:nterms) {
	kk <- alist[[i]]
	if (print.map[i] >0) {
	    j <- print.map[i]	
	    if (pterms[i]==2) 
		 temp <- (x$printfun[[j]])(x$frail, x$fvar, ,x$df[i], 
					   x$history[[j]])
	    else temp <- (x$printfun[[j]])(coef[kk], x$var[kk,kk], 
					   x$var2[kk,kk], 
					   x$df[i], x$history[[j]])
	    print1 <- rbind(print1, temp$coef)
	    if (is.matrix(temp$coef)) {
		xx <- dimnames(temp$coef)[[1]]
		if (is.null(xx))
			xx <- rep(names(pterms)[i], nrow(temp$coef))
		else    xx <- paste(names(pterms)[i], xx, sep=', ')
		pname1 <- c(pname1, xx)
		}
	    else  pname1 <- c(pname1, names(pterms)[i])
	    print2 <- c(print2, temp$history)
	    }

	else if (terms && length(kk)>1) {
	    pname1 <- c(pname1, names(pterms)[i])
	    temp <- coxph.wtest(x$var[kk,kk], coef[kk])$test
	    print1 <- rbind(print1, c(NA, NA, NA,
				      temp, x$df[i], 1-pchisq(temp, 1)))
	    }
	else {
	    pname1 <- c(pname1, names(coef)[kk])
	    tempe<- (diag(x$var))[kk]
	    temp <- coef[kk]^2/ tempe
	    print1 <- rbind(print1, cbind(coef[kk], sqrt(tempe),
				      sqrt((diag(x$var2))[kk]), 
				      temp, 1, 1-pchisq(temp, 1)))
	    }
	}

    # Format out the NA's 
    temp <- cbind(format(print1[,1]), format(print1[,2]), 
		       format(print1[,3]),
		       format(round(print1[,4], 2)),
		       format(round(print1[,5], 2)),
		       format(signif(print1[,6], 2)))
    temp <- ifelse(is.na(print1), "", temp)
    dimnames(temp) <- list(substring(pname1,1, maxlabel), 
			     c("coef","se(coef)", "se2", "Chisq","DF","p"))
    prmatrix(temp, quote=FALSE)
	
    #
    # Write out the remaider of the info
    #
    cat("\nIterations:", x$iter[1], "outer,", x$iter[2], 
	"Newton-Raphson\n")
    if (length(print2)) {
#	cat("Penalized terms:\n")
	for (i in 1:length(print2)) cat("    ", print2[i], "\n")
	}

    logtest <- -2 * (x$loglik[1] - x$loglik[2])
    if (is.null(x$df)) df <- sum(!is.na(coef))
    else  df <- round(sum(x$df),2)
#    cat("\n")
    cat("Degrees of freedom for terms=", format(round(x$df,1)), "\n")
#    cat("Cox PL (initial,final) = ", format(round(x$loglik,2)),
#	"  Penalty = ", format(x$penalty), "\n")
    cat("Likelihood ratio test=", format(round(logtest, 2)), "  on ",
	df, " df,", " p=", format(1 - pchisq(logtest, df)),  sep="")
    omit <- x$na.action
    if (length(omit))
	cat("\n  n=", x$n, " (", naprint(omit), ")\n", sep="")
    else cat("  n=", x$n, "\n")
    invisible()
    }
# SCCS @(#)print.coxph.s	4.12 10/29/98
print.coxph <-
 function(x, digits=max(options()$digits - 4, 3), ...)
    {
    if (!is.null(cl<- x$call)) {
	cat("Call:\n")
	dput(cl)
	cat("\n")
	}
    if (!is.null(x$fail)) {
	cat(" Coxph failed.", x$fail, "\n")
	return()
	}
    savedig <- options(digits = digits)
    on.exit(options(savedig))

    coef <- x$coef
    se <- sqrt(diag(x$var))
    if(is.null(coef) | is.null(se))
        stop("Input is not valid")

    if (is.null(x$naive.var)) {
	tmp <- cbind(coef, exp(coef), se, coef/se,
	       signif(1 - pchisq((coef/ se)^2, 1), digits -1))
	dimnames(tmp) <- list(names(coef), c("coef", "exp(coef)",
	    "se(coef)", "z", "p"))
	}
    else {
	nse <- sqrt(diag(x$naive.var))
	tmp <- cbind(coef, exp(coef), nse, se, coef/se,
	       signif(1 - pchisq((coef/se)^2, 1), digits -1))
	dimnames(tmp) <- list(names(coef), c("coef", "exp(coef)",
	    "se(coef)", "robust se", "z", "p"))
	}
    cat("\n")
    prmatrix(tmp)

    logtest <- -2 * (x$loglik[1] - x$loglik[2])
    if (is.null(x$df)) df <- sum(!is.na(coef))
    else  df <- round(sum(x$df),2)
    cat("\n")
    cat("Likelihood ratio test=", format(round(logtest, 2)), "  on ",
	df, " df,", " p=", format(1 - pchisq(logtest, df)),  sep="")
    omit <- x$na.action
    if (length(omit))
	cat("  n=", x$n, " (", naprint(omit), ")\n", sep="")
    else cat("  n=", x$n, "\n")
    if (length(x$icc))
	cat("   number of clusters=", x$icc[1],
	    "    ICC=", format(x$icc[2:3]), "\n")
    invisible()
    }
#SCCS @(#)print.summary.survfit.s	4.7 07/09/00
print.summary.survfit <- function(x, 
				  digits = max(options()$digits - 4, 3), ...) {
    savedig <- options(digits=digits)
    on.exit(options(savedig))

    if (!is.null(cl<- x$call)) {
	cat("Call: ")
	dput(cl)
	cat("\n")
	}

    omit <- x$na.action
    if (length(omit)) 
	    cat(naprint(omit), "\n")
    if (x$type == 'right' || is.null(x$n.entered)) {
	mat <- cbind(x$time, x$n.risk, x$n.event, x$surv)
	cnames <- c("time", "n.risk", "n.event")
        }

    else if (x$type == 'counting') {
	mat <- cbind(x$time, x$n.risk, x$n.event, x$n.entered,
		     x$n.exit.censored, x$surv)
	cnames <- c("time", "n.risk", "n.event", 
		    "n.entered", "n.censored")
        }
    if (is.matrix(x$surv)) ncurve <- ncol(x$surv)
    else	           ncurve <- 1
    if (ncurve==1) {                 #only 1 curve
	cnames <- c(cnames, "survival")
	if (!is.null(x$std.err)) {
	    if (is.null(x$lower)) {
		mat <- cbind(mat, x$std.err)
		cnames <- c(cnames, "std.err")
	        }
	    else {
		mat <- cbind(mat, x$std.err, x$lower, x$upper)
		cnames <- c(cnames, 'std.err',
			    paste("lower ", x$conf.int*100, "% CI", sep=''),
			    paste("upper ", x$conf.int*100, "% CI", sep=''))
	        }	
	    }
        }
    else cnames <- c(cnames, paste("survival", seq(ncurve), sep=''))

    if (!is.null(x$new.start)) {
	mat.keep <- mat[,1] >= x$new.start
	mat <- mat[mat.keep,,drop=FALSE]
	if (is.null(dim(mat)))
		stop(paste("No information available using new.start =", x$new.start, "."))
        }
    if (!is.matrix(mat)) mat <- matrix(mat, nrow=1)
    if (!is.null(mat)) {
	dimnames(mat) <- list(NULL, cnames)
	if (is.null(x$strata))
		prmatrix(mat, rowlab=rep("", nrow(mat)))
	else  { #print it out one strata at a time
	    if (!is.null(x$times.strata))
		    strata <- x$times.strata
	    else
		    strata <- x$strata
	   
	    if (!is.null(x$new.start))
		    strata <- strata[mat.keep]
	    for (i in levels(strata)) {
		who <- (strata==i)
		cat("               ", i, "\n")
		if (sum(who) ==1)
			print(mat[who,])
	        else
		    prmatrix(mat[who,], rowlab=rep("", sum(who)))

		cat("\n")
 	        }
	    }
        }
    else 
	stop("There are no events to print.  Please use the option ",
	    "censored=TRUE with the summary function to see the censored ",
	    "observations.")
    invisible(x)
    }














# SCCS @(#)print.summary.survreg.s	4.14 02/11/99
print.summary.survreg <- function(x, digits = max(options()$digits - 4, 3),
				  ...) {
    correl <- x$correl
    n <- x$n

    if(is.null(digits))
        digits <- options()$digits
    cat("\nCall:\n")
    dput(x$call)

    print(x$table, digits = digits)
    if (nrow(x$var)==length(x$coefficients)) 
	    cat("\nScale fixed at",format(x$scale, digits=digits),"\n") 
    else if (length(x$scale)==1) 
	    cat ("\nScale=", format(x$scale, digits=digits), "\n")
    else {
	cat("\nScale:\n")
	print(x$scale, digits=digits, ...)
	}

    cat("\n", x$parms, "\n", sep='')
    df  <- sum(x$df) - x$idf   # The sum is for penalized models
    cat("Loglik(model)=", format(round(x$loglik[2],1)),
	"  Loglik(intercept only)=", format(round(x$loglik[1],1)))
    if (df > 0)
	    cat("\n\tChisq=", format(round(x$chi,2)), "on", round(df,1),
		"degrees of freedom, p=", 
		format(signif(1-pchisq(x$chi, df),2)), "\n")
    else cat("\n")
    cat("Number of Newton-Raphson Iterations:", format(trunc(x$iter)),
        "\n")
    omit <- x$na.action
    if (length(omit))
	cat("n=", x$n, " (", naprint(omit), ")\n", sep="")
    else cat("n=", x$n, "\n")

    if(!is.null(correl)) {
        p <- dim(correl)[2]
        if(p > 1) {
            cat("\nCorrelation of Coefficients:\n")
            ll <- lower.tri(correl)
            correl[ll] <- format(round(correl[ll], digits=digits))
            correl[!ll] <- ""
            print(correl[-1,  - p, drop = FALSE], quote = FALSE)
            }
        }
    cat("\n")
    invisible(NULL)
    }
#SCCS 12/29/97 @(#)print.survdiff.s	4.11
print.survdiff <- function(x, digits = max(options()$digits - 4, 3), ...) {

    saveopt <-options(digits=digits)
    on.exit(options(saveopt))

    if (!inherits(x, 'survdiff'))
	stop("Object is not the result of survdiff")
    if (!is.null(cl<- x$call)) {
	cat("Call:\n")
	dput(cl)
	cat("\n")
	}

    omit <- x$na.action
    if (length(omit)) cat("n=", sum(x$n), ", ", naprint(omit),
					  ".\n\n", sep='')

    if (length(x$n)==1)  {
	z <- sign(x$exp - x$obs) * sqrt(x$chisq)
	temp <- c(x$obs, x$exp, z, signif(1-pchisq(x$chisq, 1),digits))
	names(temp) <- c("Observed", "Expected", "Z", "p")
	print(temp)
	}
    else {
	if (is.matrix(x$obs)){
	    otmp <- apply(x$obs,1,sum)
	    etmp <- apply(x$exp,1,sum)
	    }
	else {
	    otmp <- x$obs
	    etmp <- x$exp
	    }
	df <- (sum(1*(etmp>0))) -1
	temp <- cbind(x$n, otmp, etmp, ((otmp-etmp)^2)/ etmp,
					 ((otmp-etmp)^2)/ diag(x$var))
	dimnames(temp) <- list(names(x$n), c("N", "Observed", "Expected",
				  "(O-E)^2/E", "(O-E)^2/V"))
	print(temp)
	cat("\n Chisq=", format(round(x$chisq,1)),
		 " on", df, "degrees of freedom, p=",
		 format(signif(1-pchisq(x$chisq, df),digits)), "\n")
       }
    invisible(x)
    }
#SCCS @(#)print.survexp.s	4.12 12/29/97
print.survexp <- function(x, scale=1, digits = max(options()$digits - 4, 3), naprint=FALSE, ...) {
    if (!inherits(x, 'survexp'))
	    stop("Invalid data")
    savedig <- options(digits=digits)
    on.exit(options(savedig))

    if (!is.null(cl<- x$call)) {
	cat("Call:\n")
	dput(cl)
	cat("\n")
	}

    if (!is.null(x$summ)) cat(x$summ)
    omit <- x$na.action
    if (length(omit))
	cat(naprint(omit), "\n")
    else cat("\n")

    if (is.null(x$strata))  { #print it as a matrix
	mat <- cbind(x$time/scale, x$n.risk, x$surv, x$std.err)
	if (!naprint) {
	    miss <- (is.na(mat)) %*% rep(1,ncol(mat))
	    mat <- mat[miss<(ncol(mat)-2),,drop=FALSE]
	    }
	if (is.matrix(x$surv)) cname <- dimnames(x$surv)[[2]]
	else                     cname <- "survival"
	if (!is.null(x$std.err))
	      cname <- c(cname, paste("se(", cname, ")", sep=''))
	prmatrix(mat, rowlab=rep("", nrow(mat)),
		   collab=c("Time", "n.risk", cname))
	}
    else  { #print it out one strata at a time, since n's differ
	if (is.null(x$std.err)) tname <- 'survival'
	else                      tname <- c('survival', 'se(surv)')
	nstrat <- length(x$strata)
	levs <- names(x$strata)
	if (nrow(x$surv)==1) {
	    mat <- cbind(c(x$n.risk), c(x$surv), c(x$std.err*x$surv))
	    dimnames(mat) <- list(levs, c("n.risk", tname))
	    cat(" Survival at time", x$time, "\n")
	    prmatrix(mat)
	    }
	else {
	    for (i in 1:nstrat) {
		cat("       ", levs[i], "\n")
		mat <- cbind(x$time/scale, x$n.risk[,i], x$surv[,i])
		if (!is.null(x$std.err)) mat<- cbind(mat,
			   x$std.err[,i] * x$surv[,i])
		if (!naprint) mat <- mat[!is.na(mat[,3]),,drop=FALSE]
		prmatrix(mat, rowlab=rep("",nrow(mat)),
				collab=c("Time", "n.risk", tname))
		cat("\n")
		}
	    }
	}
    invisible(x)
    }
#SCCS @(#)print.survfit.s	4.19 07/09/00
print.survfit <- function(x, scale=1, 
			  digits = max(options()$digits - 4, 3),
                          print.n=getOption("survfit.print.n"),show.rmean=getOption("survfit.print.mean"),...) {

    ##<TSL> different definitions of N....
    print.n<-match.arg(print.n,c("none","start","records","max"))

    if (!is.null(cl<- x$call)) {
	cat("Call: ")
	dput(cl)
	cat("\n")
    }	
    omit <- x$na.action
    if (length(omit)) cat("  ", naprint(omit), "\n")

    savedig <- options(digits=digits)
    on.exit(options(savedig))
    pfun <- function(nused, stime, surv, n.risk, n.event, lower, upper) {
        ##compute the mean, median, se(mean), and ci(median)
	minmin <- function(y, xx) {
            ww<-getOption("warn")
            on.exit(options(warn=ww))
            options(warn=-1)
            if (any(!is.na(y) & y==.5)) {	
		if (any(!is.na(y) & y <.5))
                    .5*(min(xx[!is.na(y) & y==.5]) + min(xx[!is.na(y) & y<.5]))
		else
                    .5*(min(xx[!is.na(y) & y==.5]) + max(xx[!is.na(y) & y==.5]))
            }
            else   min(xx[!is.na(y) & y<=.5])
        }
        
	min.stime <- min(stime)
	min.time <- min(0, min.stime)
	n <- length(stime)
	hh <- c(ifelse((n.risk[-n]-n.event[-n])==0, 0, 
		       n.event[-n]/(n.risk[-n]*(n.risk[-n]-n.event[-n]))),0)
	ndead<- sum(n.event)
	dif.time <- c(diff(c(min.time, stime)), 0)
	if (is.matrix(surv)) {
	    n <- nrow(surv)
	    mean <- dif.time * rbind(1, surv)
	    if (n==1)
                temp <- mean[2,,drop=FALSE]
	    else
                temp <- (apply(mean[(n+1):2,,drop=FALSE], 2, cumsum))[n:1,,drop=FALSE]
	    varmean <- c(hh %*% temp^2)
	    med <- apply(surv, 2, minmin, stime)
	    #nused <- as.list(nused)
	    names(nused)<-NULL
	    if (!is.null(upper)) {
		upper <- apply(upper, 2, minmin, stime)
		lower <- apply(lower, 2, minmin, stime)
		cbind(nused, ndead, apply(mean, 2, sum),
		      sqrt(varmean), med, lower, upper)
	        }
	    else {
		cbind(nused, ndead, apply(mean, 2, sum),
		      sqrt(varmean), med)
	        }
	    }
	else {
	    mean <- dif.time*c(1, surv)
	    varmean <- sum(rev(cumsum(rev(mean))^2)[-1] * hh)
	    med <- minmin(surv, stime)
	    if (!is.null(upper)) {
		upper <- minmin(upper, stime)
		lower <- minmin(lower, stime)
		c(nused, ndead, sum(mean), sqrt(varmean), med, lower, upper)
	        }
	    else
		    c(nused, ndead, sum(mean), sqrt(varmean), med)
	    }
    }

    stime <- x$time/scale
    surv <- x$surv
    plab <- c("n", "events", "rmean", "se(rmean)", "median")
    if (!is.null(x$conf.int))
	    plab2<- paste(x$conf.int, c("LCL", "UCL"), sep='')

    #Four cases: strata Y/N  by  ncol(surv)>1 Y/N
    #  Repeat the code, with minor variations, for each one
    if (is.null(x$strata)) {
        nsubjects<-switch(print.n,none=NA,
                          start=x$n.risk[1],
                          records=x$n,
                          max=max(x$n.risk))
        ##x1 <- pfun(x$n, stime, surv, x$n.risk, x$n.event, x$lower, x$upper)
        x1 <- pfun(nsubjects, stime, surv, x$n.risk, x$n.event, x$lower, x$upper)
	if (is.matrix(x1)) {
	    if (is.null(x$lower))
		    dimnames(x1) <- list(NULL, plab)
	    else
		    dimnames(x1) <- list(NULL, c(plab, plab2))
	    }
	else {
	    if (is.null(x$lower))
		    names(x1) <- plab
	    else
		    names(x1) <- c(plab, plab2)
 	    }
        if (show.rmean)
            print(x1)
        else
            print(x1[,!(colnames(x1) %in% c("rmean","se(rmean)"))])
        }
    else {   #strata case
	nstrat <- length(x$strata)
        if (is.null(x$ntimes.strata))
		stemp <- rep(1:nstrat,x$strata)
	else stemp <- rep(1:nstrat,x$ntimes.strata)
	x1 <- NULL
	if (is.null(x$strata.all))
            strata.var <- x$strata
	else
            strata.var <- x$strata.all

 	for (i in unique(stemp)) {
	    who <- (stemp==i)
            ##different defn's of n
            nsubjects<-switch(print.n,none=NA,
                              start=x$n.risk[who][1],
                              records=strata.var[i],
                              max=max(x$n.risk[who]))
	    if (is.matrix(surv)) {
		temp <- pfun(nsubjects, stime[who], surv[who,,drop=FALSE],
			  x$n.risk[who], x$n.event[who],
			  x$lower[who,,drop=FALSE], x$upper[who,,drop=FALSE])
		x1 <- rbind(x1, temp)
	        }
	    else  {
		temp <- pfun(nsubjects, stime[who], surv[who], 
			     x$n.risk[who], x$n.event[who], x$lower[who], 
			     x$upper[who])
		x1 <- rbind(x1, temp)
	        }
	    }

	temp <- names(x$strata)
	if (nrow(x1) > length(temp)) {
	    nrep <- nrow(x1)/length(temp)
	    temp <- rep(temp, rep(nrep, length(temp)))
	    }

	if (is.null(x$lower))
		dimnames(x1) <- list(temp, plab)
	else
		dimnames(x1) <- list(temp, c(plab, plab2))

	if (show.rmean)
            print(x1)
        else
            print(x1[,!(colnames(x1) %in% c("rmean","se(rmean)"))])
        
    }
invisible(x)
}






# SCCS @(#)print.survreg.penal.s	1.1 11/25/98
print.survreg.penal <-  function(x, terms=FALSE, maxlabel=25,
			       digits=max(options()$digits - 4, 3), ...) {
    if (!inherits(x, 'survreg.penal')) stop("Invalid object")

    if (!is.null(x$call)) {
	cat("Call:\n")
	dput(x$call) 
	cat("\n")
	}
    if (!is.null(x$fail)) {
	cat(" Survreg failed.", x$fail, "\n")
	return()
	}
    savedig <- options(digits = digits)
    on.exit(options(savedig))

    coef <- x$coef
    if (length(coef)==0)
	    stop("Penalized fits must have an intercept!")

    #
    # Map terms to special print functions, and the list of iteration histories
    #
    pterms <- x$pterms
    nterms <- length(pterms)
    npenal <- sum(pterms>0)
    print.map <- rep(0,nterms)
    if (!is.null(x$printfun)) {
	temp <- unlist(lapply(x$printfun, is.null))  #which ones are missing
	print.map[pterms>0] <- (1:npenal) * (!temp)
	}

    # Tedious, but build up the coef matrix a term at a time
    print1 <- NULL
    pname1 <- NULL
    if (is.null(x$assign2)) alist <- x$assign
    else alist <- x$assign2

    print2 <- NULL
    for (i in 1:nterms) {
	kk <- alist[[i]]
	if (print.map[i] >0) {
	    j <- print.map[i]	
	    if (pterms[i]==2) 
		 temp <- (x$printfun[[j]])(x$frail, x$fvar, ,x$df[i], 
					   x$history[[j]])
	    else temp <- (x$printfun[[j]])(coef[kk], x$var[kk,kk], 
					   x$var2[kk,kk], 
					   x$df[i], x$history[[j]])
	    print1 <- rbind(print1, temp$coef)
	    if (is.matrix(temp$coef)) {
		xx <- dimnames(temp$coef)[[1]]
		if (is.null(xx))
			xx <- rep(names(pterms)[i], nrow(temp$coef))
		else    xx <- paste(names(pterms)[i], xx, sep=', ')
		pname1 <- c(pname1, xx)
		}
	    else  pname1 <- c(pname1, names(pterms)[i])
	    print2 <- c(print2, temp$history)
	    }

	else if (terms && length(kk)>1) {
	    pname1 <- c(pname1, names(pterms)[i])
	    temp <- coxph.wtest(x$var[kk,kk], coef[kk])$test
	    print1 <- rbind(print1, c(NA, NA, NA,
				      temp, x$df[i], 1-pchisq(temp, 1)))
	    }
	else {
	    pname1 <- c(pname1, names(coef)[kk])
	    tempe<- (diag(x$var))[kk]
	    temp <- coef[kk]^2/ tempe
	    print1 <- rbind(print1, cbind(coef[kk], sqrt(tempe),
				      sqrt((diag(x$var2))[kk]), 
				      temp, 1, 1-pchisq(temp, 1)))
	    }
	}

    # Format out the NA's 
    temp <- cbind(format(print1[,1]), format(print1[,2]), 
		       format(print1[,3]),
		       format(round(print1[,4], 2)),
		       format(round(print1[,5], 2)),
		       format(signif(print1[,6], 2)))
    temp <- ifelse(is.na(print1), "", temp)
    dimnames(temp) <- list(substring(pname1,1, maxlabel), 
			     c("coef","se(coef)", "se2", "Chisq","DF","p"))
    prmatrix(temp, quote=FALSE)
	
    #
    # Write out the remaider of the info
    #
    if (nrow(x$var)==length(coef)) 
	    cat("\nScale fixed at",format(x$scale),"\n") 
    else if (length(x$scale)==1) cat ("\nScale=", format(x$scale), "\n")
    else {
	cat("\nScale:\n")
	print(x$scale, ...)
	}

    cat("\nIterations:", x$iter[1], "outer,", x$iter[2], 
	"Newton-Raphson\n")
    if (length(print2)) {
#	cat("Penalized terms:\n")
	for (i in 1:length(print2)) cat("    ", print2[i], "\n")
	}

    logtest <- -2 * (x$loglik[1] - x$loglik[2])
    df <- sum(x$df) - x$idf
#    cat("\n")
    cat("Degrees of freedom for terms=", format(round(x$df,1)), "\n")
#    cat("Loglik (initial,final) = ", format(round(x$loglik,2)),
#	"  Penalty = ", format(x$penalty), "\n")
    cat("Likelihood ratio test=", format(round(logtest, 2)), "  on ",
	round(df,1), " df,", " p=", format(1 - pchisq(logtest, df)),  sep="")

    n <- length(x$linear.predictors)
    omit <- x$na.action
    if (length(omit))
	cat("\n  n=", n, " (", naprint(omit), ")\n", sep="")
    else cat("  n=", n, "\n")
    invisible()
    }
#SCCS  @(#)print.survreg.s	4.10 11/30/98
print.survreg <- function(x, ...)
{
    if(!is.null(cl <- x$call)) {
        cat("Call:\n")
        dput(cl)
        }
    if (!is.null(x$fail)) {
	cat(" Survreg failed.", x$fail, "\n")
	return(invisible(x))
	}
    coef <- x$coef
    if(any(nas <- is.na(coef))) {
	if(is.null(names(coef))) names(coef) <- paste("b", 1:length(coef), sep = "")
        cat("\nCoefficients: (", sum(nas), 
            " not defined because of singularities)\n", sep = "")
        }
    else cat("\nCoefficients:\n")
    print(coef, ...)
    
    if (nrow(x$var)==length(coef)) 
	    cat("\nScale fixed at",format(x$scale),"\n") 
    else if (length(x$scale)==1) cat ("\nScale=", format(x$scale), "\n")
    else {
	cat("\nScale:\n")
	print(x$scale, ...)
	}


    nobs <- length(x$linear)
    chi <- 2*diff(x$loglik)
    df  <- sum(x$df) - x$idf   # The sum is for penalized models
    cat("\nLoglik(model)=", format(round(x$loglik[2],1)),
	"  Loglik(intercept only)=", format(round(x$loglik[1],1)))
    if (df > 0)
	    cat("\n\tChisq=", format(round(chi,2)), "on", round(df,1),
		"degrees of freedom, p=", 
		format(signif(1-pchisq(chi, df),2)), "\n")
    else cat("\n")

    omit <- x$na.action
    if (length(omit))
	cat("n=", nobs, " (", naprint(omit), ")\n", sep="")
    else cat("n=", nobs, "\n")
    invisible(x)
    }
# SCCS @(#)pspline.s	1.4 02/24/99
#
# the p-spline function for a Cox model
#
pspline <- function(x, df=4, theta, nterm=2.5*df, degree=3, eps=0.1, 
		    method, ...) {
    ##require(splines)
    if (!missing(theta)) {
	method <- 'fixed'
	if (theta <=0 || theta >=1) stop("Invalid value for theta")
	}
    else if (df ==0 || (!missing(method) && method=='aic')) {
	method <- 'aic'
	nterm <- 15    #will be ok for up to 6-8 df
	if (missing(eps)) eps <- 1e-5
	}
    else {
	method <- 'df'
	if (df <=1) stop ('Too few degrees of freedom')
	}

    xname <- deparse(substitute(x))
    keepx <- !is.na(x)
    rx <- range(x[keepx])
    nterm <- round(nterm)
    if (nterm < 3) stop("Too few basis functions")
    dx <- (rx[2] - rx[1])/nterm
    knots <- c(rx[1] + dx*((-degree):(nterm-1)), rx[2]+ dx*(0:degree))
    if (all(keepx)) newx <- spline.des(knots, x, degree+1)$design
    else {
	temp <- spline.des(knots, x[keepx], degree+1)$design
	newx <- matrix(NA, length(x), ncol(temp))
	newx[keepx,] <- temp
        }
    newx <- newx[,-1]              #redundant coefficient with lambda_0
    class(newx) <- 'coxph.penalty'
    nvar <- 1 + ncol(newx)   #should be nterm + degree
    dmat <- diag(nvar)
    dmat <- apply(dmat, 2, diff, 1, 2) 
    dmat <- t(dmat) %*% dmat
    dmat <- dmat[-1,-1]                  # rows corresponding to the 0 coef
    xnames <-paste('ps(', xname, ')', 2:nvar, sep='')

    pfun <- function(coef, theta, n, dmat) {
	if (theta >=1) list(penalty= 100*(1-theta), flag=TRUE)
	else {
	    if (theta <= 0) lambda <- 0 
	    else lambda <- theta / (1-theta)
	    list(penalty= c(coef %*% dmat %*% coef) * lambda/2,
		 first  = c(dmat %*% coef) * lambda ,
		 second = c(dmat * lambda),
		 flag=FALSE
		 )
	    }
        }	

    printfun <- function(coef, var, var2, df, history) {
	test1 <- coxph.wtest(var, coef)$test
	# cbase contains the centers of the basis functions
	#   do a weighted regression of these on the coefs to get a slope
	xmat <- cbind(1, cbase)
	xsig <- coxph.wtest(var, xmat)$solve   # V X , where V = g-inverse(var)
	# [X' V X]^{-1} X' V
	cmat <- coxph.wtest(t(xmat)%*% xsig, t(xsig))$solve[2,]  
        linear <- sum(cmat * coef)
	lvar1  <- c(cmat %*% var %*% cmat)
	lvar2  <- c(cmat %*% var2%*% cmat)
	test2 <- linear^2 / lvar1
	# the "max(.5, df-1)" below stops silly (small) p-values for a
	#  chisq of 0 on 0 df, when using AIC gives theta near 1
	cmat <- rbind(c(linear, sqrt(lvar1), sqrt(lvar2), 
			test2, 1, 1-pchisq(test2, 1)),
		      c(NA, NA, NA, test1-test2, df-1, 
			1-pchisq(test1-test2, max(.5,df-1))))
	dimnames(cmat) <- list(c("linear", "nonlin"), NULL)
	nn <- nrow(history$thetas)
	if (length(nn)) theta <- history$thetas[nn,1]
	else  theta <- history$theta
	list(coef=cmat, history=paste("Theta=", format(theta)))
	}
    # Line 2 below is a real sneaky thing, see notes.
    ## We don't need to be sneaky. We have lexical scope :)
    ## printfun[[6]] <- knots[2:nvar] + (rx[1] - knots[1])
    cbase<-knots[2:nvar] + (rx[1] - knots[1])	       
    if (method=='fixed') {
	temp <- list(pfun=pfun,
		     printfun=printfun,
		     pparm=dmat,
		     diag =FALSE,
		     cparm=list(theta=theta),
		     varname=xnames,
		     cfun = function(parms, iter, old)
			         list(theta=parms$theta, done=TRUE))
	}
    else if (method=='df') {
	temp <- list(pfun=pfun,
		     printfun=printfun,
		     diag =FALSE,
		     cargs=('df'),
		     cparm=list(df=df, eps=eps, thetas=c(1,0),
		                dfs=c(1, nterm), guess=1 - df/nterm, ...),
		     pparm= dmat,
		     varname=xnames,
		     cfun = frailty.controldf)
	}

    else { # use AIC
	temp <- list(pfun=pfun,
		     printfun=printfun,
		     pparm=dmat,
		     diag =FALSE,
		     cargs = c('neff', 'df', 'plik'),
		     cparm=list(eps=eps, init=c(.5, .95), 
		                lower=0, upper=1, ...),
		     varname=xnames,
		     cfun = frailty.controlaic)
	}
    
    attributes(newx) <- c(attributes(newx), temp)
    newx
    }
#SCCS  @(#)pyears.s	5.4 02/19/99
pyears <- function(formula=formula(data), data=sys.frame(sys.parent()),
	weights, subset, na.action,
	ratetable=survexp.us, scale=365.25,  expect=c('event', 'pyears'),
	model=FALSE, x=FALSE, y=FALSE) {

    expect <- match.arg(expect)
    call <- match.call()
    m <- match.call(expand=FALSE)
    m$ratetable <- m$model <- m$x <- m$y <- m$scale<- m$expect <- NULL

    Terms <- if(missing(data)) terms(formula, 'ratetable')
	     else              terms(formula, 'ratetable',data=data)
    if (any(attr(Terms, 'order') >1))
	    stop("Pyears cannot have interaction terms")
    m$formula <- Terms
    m[[1]] <- as.name("model.frame")
    m <- eval(m, parent.frame())

    Y <- model.extract(m, 'response')
    if (is.null(Y)) stop ("Follow-up time must appear in the formula")
    if (!is.Surv(Y)){
	if (any(Y <0)) stop ("Negative follow up time")
	Y <- as.matrix(Y)
	if (ncol(Y) >2) stop("Y has too many columns")
	if (ncol(Y)==2 && any(Y[,2] <= Y[,1]))
	    stop("stop time must be > start time")
	}
    n <- nrow(Y)

    weights <- model.extract(m, 'weights')

    rate <- attr(Terms, "specials")$ratetable
    if (length(rate) >1 )
	stop ("Can have only 1 ratetable() call in a formula")
    else if (length(rate) == 0 && !missing(ratetable)) {
	# add a 'ratetable' call to the internal formula
        # The dummy function stops an annoying warning message "Looking for
        #  'formula' of mode function, ignored one of mode ..."
	xx <- function(x) formula(x)
    
	if(is.ratetable(ratetable))   varlist <- attr(ratetable, "dimid")
	else stop("Invalid rate table")

	ftemp <- deparse(substitute(formula))
	formula <- xx( paste( ftemp, "+ ratetable(",
			  paste( varlist, "=", varlist, collapse = ","), ")"))
	Terms <- if (missing(data)) terms(formula, "ratetable")
	         else               terms(formula, "ratetable", data = data)
	rate <- attr(Terms, "specials")$ratetable
	}

    if (length(rate)==1) {
	ovars <- (dimnames(attr(Terms, 'factors'))[[1]])[-c(1, rate)]
	rtemp <- match.ratetable(m[,rate], ratetable)
	R <- rtemp$R
	if (!is.null(rtemp$call)) {#need to drop some dimensions from ratetable
	    ratetable <- eval(parse(text=rtemp$call))
	    }
	}
    else ovars <- (dimnames(attr(Terms, 'factors'))[[1]])[-1]

    # Now process the other (non-ratetable) variables
    if (length(ovars)==0)  {
	# no categories!
	X <- rep(1,n)
	ofac <- odim <- odims <- ocut <- 1
	}
    else {
	odim <- length(ovars)
	ocut <- NULL
	odims <- ofac <- double(odim)
	X <- matrix(0, n, odim)
	outdname <- vector("list", odim)
	for (i in 1:odim) {
	    temp <- m[[ovars[i]]]
	    ctemp <- class(temp)
	    if (!is.null(ctemp) && ctemp=='tcut') {
		X[,i] <- temp
		temp2 <- attr(temp, 'cutpoints')
		odims[i] <- length(temp2) -1
		ocut <- c(ocut, temp2)
		ofac[i] <- 0
		outdname[[i]] <- attr(temp, 'labels')
		}
	    else {
		temp2 <- factor(temp)
		X[,i] <- temp2
		temp3 <- levels(temp2)
		odims[i] <- length(temp3)
		ofac[i] <- 1
		outdname[[i]] <- temp3
		}
	    }
	}

    # Now do the computations
    ocut <-c(ocut,0)   #just in case it were of length 0
    osize <- prod(odims)
    if (length(rate)) {  #include expected
	atts <- attributes(ratetable)
	cuts <- atts$cutpoints
	rfac <- atts$factor
	us.special <- (rfac >1)
	if (any(us.special)) {  #special handling for US pop tables
	    if (sum(us.special) >1)
		stop("Two columns marked for special handling as a US rate table")
	    #slide entry date so that it appears that they were born on Jan 1
	    cols <- match(c("age", "year"), atts$dimid)
	    if (any(is.na(cols))) stop("Ratetable does not have expected shape")
	    temp <- date.mdy(R[,cols[2]]-R[,cols[1]])
	    R[,cols[2]] <- R[,cols[2]] - mdy.date(temp$month, temp$day, 1960)
	    # Doctor up "cutpoints"
	    temp <- (1:length(rfac))[us.special]
	    nyear <- length(cuts[[temp]])
	    nint <- rfac[temp]       #intervals to interpolate over
	    cuts[[temp]] <- round(approx(nint*(1:nyear), cuts[[temp]],
					    nint:(nint*nyear))$y - .0001)
	    }

	temp <- .C("pyears1",
			as.integer(n),
			as.integer(ncol(Y)),
			as.integer(is.Surv(Y)),
			as.double(Y),
			as.integer(length(atts$dim)),
			as.integer(rfac),
			as.integer(atts$dim),
			as.double(unlist(cuts)),
			ratetable,
			as.double(R),
			as.integer(odim),
			as.integer(ofac),
			as.integer(odims),
			as.double(ocut),
			as.integer(expect=='event'),
			X,
			pyears=double(osize),
			pn    =double(osize),
			pcount=double(if(is.Surv(Y)) osize else 1),
			pexpect=double(osize),
			offtable=double(1),PACKAGE="survival")[17:21]
	}
    else {
	temp <- .C('pyears2',
			as.integer(n),
			as.integer(ncol(Y)),
			as.integer(is.Surv(Y)),
			as.double(Y),
			as.integer(odim),
			as.integer(ofac),
			as.integer(odims),
			as.double(ocut),
			X,
			pyears=double(osize),
			pn    =double(osize),
			pcount=double(if(is.Surv(Y)) osize else 1),
			offtable=double(1),PACKAGE="survival") [10:13]
	}

    if (prod(odims) ==1) {  #don't make it an array
	out <- list(call=call, pyears=temp$pyears/scale, n=temp$pn,
		    offtable=temp$offtable/scale)
	if (length(rate)) {
	    out$expected <- temp$pexpect
	    if (expect=='pyears') out$expected <- out$expected/scale
	    if (!is.null(rtemp$summ)) out$summary <- rtemp$summ
	    }
	if (is.Surv(Y)) out$event <- temp$pcount
	}
    else {
	out <- list(call = call,
		pyears= array(temp$pyears/scale, dim=odims, dimnames=outdname),
		n     = array(temp$pn,     dim=odims, dimnames=outdname),
		offtable = temp$offtable/scale)
	if (length(rate)) {
	    out$expected <- array(temp$pexpect, dim=odims, dimnames=outdname)
	    if (expect=='pyears') out$expected <- out$expected/scale
	    if (!is.null(rtemp$summ)) out$summary <- rtemp$summ
	    }
	if (is.Surv(Y))
		out$event <- array(temp$pcount, dim=odims, dimnames=outdname)
	}
    na.action <- attr(m, "na.action")
    if (length(na.action))  out$na.action <- na.action
    if (model) out$model <- m
    else {
	if (x) out$x <- cbind(X, R)
	if (y) out$y <- Y
	}
    class(out) <- 'pyears'
    out
    }

# SCCS @(#)ratetable.s	5.2 09/25/98
#
# This is a 'specials' function for pyears
#   it is a stripped down version of as.matrix(data.frame(...))
# There is no function to create a ratetable.
# This function has a class, only so that data frame subscripting will work
#
ratetable <- function(...) {
    args <- list(...)
    nargs <- length(args)
    ll <- sapply(args, length)
    n <- max(ll)
    levlist <- vector("list", nargs)
    x <- matrix(0,n,nargs)
    dimnames(x) <- list(1:n, names(args))
    for (i in 1:nargs) {
	if (ll[i] ==n) {
	    if (!is.numeric(args[[i]])) args[[i]] <- factor(args[[i]])
	    if (is.factor(args[[i]])) {
		levlist[[i]] <- levels(args[[i]])
		x[,i] <- c(args[[i]])
		}
	    else x[,i] <- args[[i]]
	    }
	else if (ll[i] ==1) levlist[i] <- args[i]
	else stop("All arguments to ratetable() must be the same length")
	}
    attr(x, "constants") <- (ll==1) & (n>1)
    attr(x, "levlist")   <- levlist
    class(x)  <- "ratetable2"
    x
    }

# The two functions below should only be called internally, when missing
#   values cause model.frame to drop some rows
is.na.ratetable2 <- function(x) {
    attributes(x) <- list(dim=dim(x))
    as.vector((1 * is.na(x)) %*% rep(1, ncol(x)) >0)
    }
"[.ratetable2" <- function(x, rows, cols, drop=FALSE) {
    if (!missing(cols)) {
       stop("This should never be called!")
       }
    aa <- attributes(x)
    attributes(x) <- aa[c("dim", "dimnames")]
    y <- x[rows,,drop=FALSE]
    attr(y,'constants') <- aa$constants
    attr(y,'levlist')   <- aa$levlist
    class(y) <- 'ratetable2'
    y
    }

#
# Functions to manipulate rate tables
#

"[.ratetable" <- function(x, ..., drop=TRUE) {
    aa <- attributes(x)
    attributes(x) <- aa[c("dim", "dimnames")]
    y <- NextMethod("[", drop=FALSE)
    newdim <- attr(y, 'dim')
    if (is.null(newdim)) stop("Invalid subscript")
    dropped <- (newdim==1)
    if (drop)  change <- (newdim!=aa$dim & !dropped)
    else       change <- (newdim!=aa$dim)

    if (any(change)) {  #dims that got smaller, but not dropped
	newcut <- aa$cutpoints
	for (i in (1:length(change))[change])
	    if (!is.null(newcut[[i]])) newcut[[i]] <-
		(newcut[[i]])[match(dimnames(y)[[i]], aa$dimnames[[i]])]
	aa$cutpoints <- newcut
	}
    if (drop && any(dropped)){
	if (all(dropped)) as.numeric(y)   #single element
	else {
	    #Note that we have to drop the summary function
	    attributes(y) <- list( dim = dim(y)[!dropped],
				   dimnames = dimnames(y)[!dropped],
				   dimid = aa$dimid[!dropped],
				   factor = aa$factor[!dropped],
				   cutpoints =aa$cutpoints[!dropped])
	    class(y) <- 'ratetable'
	    y
	    }
	}
    else {
	aa$dim <- aa$dimnames <- NULL
	attributes(y) <- c(attributes(y), aa)
	y
	}
    }

is.na.ratetable  <- function(x)
    structure(is.na(as.vector(x)), dim=dim(x), dimnames=dimnames(x))

Math.ratetable <- function(x, ...) {
    attributes(x) <- attributes(x)[c("dim", "dimnames")]
    NextMethod(.Generic)
    }

Ops.ratetable <- function(e1, e2) {
    #just treat it as an array
    if (nchar(.Method[1])) attributes(e1) <- attributes(e1)[c("dim","dimnames")]
    if (nchar(.Method[2])) attributes(e2) <- attributes(e2)[c("dim","dimnames")]
    NextMethod(.Generic)
    }

as.matrix.ratetable <- function(x) {
    attributes(x) <- attributes(x)[c("dim", "dimnames")]
    x
    }

print.ratetable <- function(x, ...)  {
    cat ("Rate table with dimension(s):", attr(x, 'dimid'), "\n")
    attributes(x) <- attributes(x)[c("dim", "dimnames")]
    NextMethod()
    }
#SCCS 04/14/92 @(#)residuals.coxph.null.s	4.2
residuals.coxph.null <-
  function(object, type=c("martingale", "deviance", "score", "schoenfeld"),
           collapse=FALSE, weighted=FALSE,  ...)
    {
    type <- match.arg(type)
    if (type=='martingale' || type=='deviance') NextMethod()
    else stop(paste("\'", type, "\' residuals are not defined for a null model",
			sep=""))
    }
# SCCS @(#)residuals.coxph.penal.s	1.2 10/31/98
residuals.coxph.penal <- function(object, 
            type=c("martingale", "deviance", "score", "schoenfeld",
			  "dfbeta", "dfbetas", "scaledsch","partial"),
	    collapse=FALSE, weighted=FALSE, ...) {
      
    type <- match.arg(type)
    # Are there any sparse terms, and if so do I need the X matrix?
    if (any(object$pterms==2) && !(type=='martingale' || type=='deviance')){
	# treat the sparse term as an offset term
	#  It gets picked up in the linear predictor, so all I need to
	#  do is "X" it out of the model so that it doesn't get picked up
	#  as a part of the X matrix and etc.
	# I know that the sparse term is a single column BTW
	#
	sparsename <- (names(object$pterms))[object$pterms==2]
	x <- object$x
	if (is.null(x)) {
	    temp <- coxph.getdata(object, y=TRUE, x=TRUE, strata=TRUE)
	    if (is.null(object$y)) object$y <- temp$y
	    if (is.null(object$strata)) object$strata <- temp$strata
	    x <- temp$x
	    }
	object$x <- x[, -match(sparsename, dimnames(x)[[2]]), drop=FALSE]
    
	temp <- attr(object$terms, 'term.labels')
	object$terms <- object$terms[-match(sparsename, temp)]
	}
    NextMethod('residuals')
    }
#  SCCS @(#)residuals.coxph.s	5.4 09/07/00
residuals.coxph <-
  function(object, type=c("martingale", "deviance", "score", "schoenfeld",
			  "dfbeta", "dfbetas", "scaledsch","partial"),
	    collapse=FALSE, weighted=FALSE,...)
    {
    type <- match.arg(type)
    otype <- type
    if (type=='dfbeta' || type=='dfbetas') {
	type <- 'score'
	if (missing(weighted))
            weighted <- TRUE  # different default
	}
    if (type=='scaledsch') type<-'schoenfeld'

    n <- length(object$residuals)
    rr <- object$residual
    y <- object$y
    x <- object$x
    vv <- object$naive.var
    if (is.null(vv)) vv <- object$var
    weights <- object$weights
    if (is.null(weights)) weights <- rep(1,n)
    strat <- object$strata
    method <- object$method
    if (method=='exact' && (type=='score' || type=='schoenfeld'))
	stop(paste(type, 'residuals are not available for the exact method'))

    if (type == 'martingale')
        rr <- object$residual
    else if (type=="partial"){ ## add terms component later, after naresid()
        rr<-object$residual
    }
    else {
	# I need Y, and perhaps the X matrix (and strata)
	Terms <- object$terms
	if (!inherits(Terms, 'terms'))
		stop("invalid terms component of object")
	strats <- attr(Terms, "specials")$strata
	if (is.null(y)  ||  (is.null(x) && type!= 'deviance')) {
	    temp <- coxph.getdata(object, y=TRUE, x=TRUE, strata=TRUE)
	    y <- temp$y
	    x <- temp$x
	    if (length(strats)) strat <- temp$strata
	    }

	ny <- ncol(y)
	status <- y[,ny,drop=TRUE]

	if (type != 'deviance') {
	    nstrat <- as.numeric(strat)
	    nvar <- ncol(x)
	    if (is.null(strat)) {
		ord <- order(y[,ny-1], -status)
		newstrat <- rep(0,n)
		}
	    else {
		ord <- order(nstrat, y[,ny-1], -status)
		newstrat <- c(diff(as.numeric(nstrat[ord]))!=0 ,1)
		}
	    newstrat[n] <- 1

	    # sort the data
	    x <- x[ord,]
	    y <- y[ord,]
	    score <- exp(object$linear.predictor)[ord]
	    }
	}

    #
    # Now I have gotton the data that I need-- do the work
    #
    if (type=='schoenfeld') {
  	if (ny==2) {
 	    mintime <- min(y[,1])
 	    if (mintime < 0) y <- cbind(2*mintime -1, y)
 	    else             y <- cbind(-1,y)
 	    }
	temp <- .C("coxscho", n=as.integer(n),
			    as.integer(nvar),
			    as.double(y),
			    resid=  as.double(x),
			    as.double(score * weights[ord]),
			    as.integer(newstrat),
			    as.integer(method=='efron'),
			    double(3*nvar),PACKAGE="survival")

	deaths <- y[,3]==1

	if (nvar==1) rr <- temp$resid[deaths]
	else         rr <- matrix(temp$resid[deaths], ncol=nvar) #pick rows 
	if (weighted) rr <- rr * weights[deaths]

	if (length(strats)) attr(rr, "strata")  <- table((strat[ord])[deaths])
	time <- c(y[deaths,2])  # 'c' kills all of the attributes
	if (is.matrix(rr)) dimnames(rr)<- list(time, names(object$coef))
	else               names(rr) <- time

	if (otype=='scaledsch') {
	    ndead <- sum(deaths)
	    coef <- ifelse(is.na(object$coef), 0, object$coef)
	    if (nvar==1) rr <- rr*vv *ndead + coef
	    else         rr <- rr %*%vv * ndead +
						outer(rep(1,nrow(rr)),coef)
	    }
	return(rr)
	}

    if (type=='score') {
	if (ny==2) {
	    resid <- .C("coxscore", as.integer(n),
				as.integer(nvar),
				as.double(y),
				x=as.double(x),
				as.integer(newstrat),
				as.double(score),
				as.double(weights[ord]),
				as.integer(method=='efron'),
				resid= double(n*nvar),
				double(2*nvar),PACKAGE="survival")$resid
	    }
	else {
	    resid<- .C("agscore",
				as.integer(n),
				as.integer(nvar),
				as.double(y),
				as.double(x),
				as.integer(newstrat),
				as.double(score),
				as.double(weights[ord]),
				as.integer(method=='efron'),
				resid=double(n*nvar),
				double(nvar*6),PACKAGE="survival")$resid
	    }
	if (nvar >1) {
	    rr <- matrix(0, n, nvar)
	    rr[ord,] <- matrix(resid, ncol=nvar)
	    dimnames(rr) <- list(names(object$resid), names(object$coef))
	    }
	else rr[ord] <- resid

	if      (otype=='dfbeta') {
	    if (is.matrix(rr)) rr <- rr %*% vv
	    else               rr <- rr * vv
	    }
	else if (otype=='dfbetas') {
	    if (is.matrix(rr))  rr <- (rr %*% vv) %*% diag(sqrt(1/diag(vv)))
	    else                rr <- rr * sqrt(vv)
	    }
	}

    #
    # Multiply up by case weights (which will be 1 for unweighted)
    #
    if (weighted) rr <- rr * weights

    #Expand out the missing values in the result
    if (!is.null(object$na.action)) {
	rr <- naresid(object$na.action, rr)
   	if (is.matrix(rr)) n <- nrow(rr)
	else               n <- length(rr)
	if (type=='deviance') status <- naresid(object$na.action, status)
	}
    if (type=="partial") ## predict already uses naresid()
      rr<-rr+predict(object,type="terms")

    # Collapse if desired
    if (!missing(collapse)) {
	if (length(collapse) !=n) stop("Wrong length for 'collapse'")
	rr <- rowsum(rr, collapse)
  	if (type=='deviance') status <- rowsum(status, collapse)
	}

    # Deviance residuals are computed after collapsing occurs
    if (type=='deviance')
	sign(rr) *sqrt(-2* (rr+
			      ifelse(status==0, 0, status*log(status-rr))))
    else rr
    }
# SCCS @(#)residuals.survreg.penal.s	1.1 11/30/98
# This routine just stops disastrous arithmetic for models with sparse
# terms.  A placeholder until the proper sparse terms actions are inserted.
residuals.survreg.penal <- function(object, ...) {
    pterms <- object$pterms
    if (any(pterms==2))
	    stop("Residualss not available for sparse models")
    NextMethod('residuals')
    }
# SCCS @(#)residuals.survreg.s	4.12 02/06/99
# 
#  Residuals for survreg objects
residuals.survreg <- function(object, type=c('response', 'deviance',
		      'dfbeta', 'dfbetas', 'working', 'ldcase',
		      'ldresp', 'ldshape', 'matrix'), 
		      rsigma =TRUE, collapse=FALSE, weighted=FALSE, ...) {
    type <-match.arg(type)
    n <- length(object$linear.predictors)
    Terms <- object$terms
    if(!inherits(Terms, "terms"))
	    stop("invalid terms component of  object")

    strata <- attr(Terms, 'specials')$strata
    coef <- object$coefficients
    intercept <- attr(Terms, "intercept")
    response  <- attr(Terms, "response")
    weights <- object$weights
    if (is.null(weights)) weighted <- FALSE

    #
    # What do I need to do the computations?
    #
    if (type=='response' || type=='deviance') need.x <-FALSE
    else need.x <- TRUE

    if (type=='ldshape' || type=='ldcase') need.strata <- TRUE
    else need.strata <- FALSE
    
    need.y <- TRUE

    # grab what I need
    if (need.strata || (need.y && is.null(object$y)) || 
	               (need.x && is.null(object$x)) ) {
	# I need the model frame
	if (is.null(object$model)) m <- model.frame(object)
	else m <- object$model
	}

    
    if (need.strata && !is.null(strata)) {
	temp <- untangle.specials(Terms, 'strata', 1)
	Terms2 <- Terms[-temp$terms]
	if (length(temp$vars)==1) strata.keep <- m[[temp$vars]]
	else strata.keep <- strata(m[,temp$vars], shortlabel=TRUE)
	strata <- as.numeric(strata.keep)
	nstrata <- max(strata)
	sigma <- object$scale[strata]
	}
    else {
	strata <- rep(1,n)
	nstrata <- 1
	sigma <- object$scale
	Terms2 <- Terms
	}

    if (need.x){
	x <- object$x
	if (is.null(x)) x <- model.matrix(Terms2, m)
	}
	
    if (need.y) {
	y <- object$y
	if (is.null(y)) y <- model.extract(m, 'response')
	status <- y[,ncol(y)]
	}
    #
    # Grab the distribution
    #
    if (is.character(object$dist)) 
	    dd <- survreg.distributions[[object$dist]]
    else dd <- object$dist
    if (is.null(dd$itrans)) {
	itrans <- dtrans <-function(x)x
	}
    else {
	itrans <- dd$itrans
	dtrans <- dd$dtrans
	}
    if (!is.null(dd$dist))  dd <- survreg.distributions[[dd$dist]]
    deviance <- dd$deviance
    dens <- dd$density

    nvar <- length(object$coef)
    if (rsigma) vv <- object$var
    else        vv <- object$var[1:nvar, 1:nvar]

    # Create the matrix of derivatives
    #  The "density" function returns F, 1-F, f, f'/f, and f''/f
    if (type != 'response') {
	status <- y[,ncol(y)]
	eta <- object$linear.predictor
	z <- (y[,1] - eta)/sigma
	dmat <- dens(z, object$parms)
	dtemp<- dmat[,3] * dmat[,4]    #f'
	if (any(status==3)) {
	    z2 <- (y[,2] - eta)/sigma
	    dmat2 <- dens(z2, object$parms)
	    }
	else {
	    dmat2 <- dmat   #dummy values
	    z2 <- 0
	    }
	deriv <- matrix(n,6)
	tdenom <- ((status==0) * dmat[,2]) +
		  ((status==1) * 1 )       +
		  ((status==2) * dmat[,1]) +
		  ((status==3) * ifelse(z>0, dmat[,2]-dmat2[,2], 
		                             dmat2[,1] - dmat[,1]))
	g <- log(ifelse(status==1, dmat[,3]/sigma, tdenom))
	tdenom <- 1/(tdenom* sigma)
	dg <- -tdenom   *(((status==0) * (0-dmat[,3])) +
			  ((status==1) * dmat[,4]) + 
			  ((status==2) * dmat[,3]) +
			  ((status==3) * (dmat2[,3]- dmat[,3])))

	ddg <- (tdenom/sigma)*(((status==0) * (0- dtemp)) +
			       ((status==1) * dmat[,5]) +
			       ((status==2) * dtemp) +
			       ((status==3) * (dmat2[,3]*dmat2[,4] - dtemp))) 
	td2 <- tdenom * sigma
	ds  <- ifelse(status<3, dg * sigma * z,
		                td2*(z2*dmat2[,3] - z*dmat[,3]))
	dds <- ifelse(status<3, ddg* (sigma*z)^2,
		                td2*(z2*z2*dmat2[,3]*dmat2[,4] -
				     z * z*dmat[,3] * dmat[,4]))
	dsg <- ifelse(status<3, ddg* sigma*z,
		      td2 *(z2*dmat2[,3]*dmat2[,4] - z*dtemp))
	deriv <- cbind(g, dg, ddg=ddg- dg^2, 
		       ds = ifelse(status==1, ds-1, ds), 
		       dds=dds - ds*(1+ds), 
		       dsg=dsg - dg*(1+ds))
	}

    #
    # Now, lay out the code one case at a time.
    #  There is some repetition this way, but otherwise the code gets
    #    too complicated.
    #
    if (type=='response') {
	yhat0 <- deviance(y, object$scale[strata], object$parms)
	rr <-  itrans(yhat0$center) - itrans(object$linear.predictor)
	}

    else if (type=='deviance') {
	yhat0 <- deviance(y, object$scale[strata], object$parms)
	rr <- (-1)*deriv[,2]/deriv[,3]  #working residuals
	rr <- sign(rr)* sqrt(2*(yhat0$loglik - deriv[,1]))
	}
    
    else if (type=='dfbeta' || type== 'dfbetas') {
	score <- deriv[,2] * x  # score residuals
	if (rsigma) score <- cbind(score, deriv[,4])
	rr <-    score %*% vv
	if (type=='dfbetas') rr <- rr %*% diag(1/sqrt(diag(vv)))
	}

    else if (type=='working') rr <- (-1)*deriv[,2]/deriv[,3]

    else if (type=='ldcase'){
	score <- deriv[,2] * x 
	if (rsigma) score <- cbind(score, deriv[,4])
	dfbeta<- score %*% vv
	rr <- apply(dfbeta*score,1,sum)
	}

    else if (type=='ldresp') {
	rscore <-  deriv[,3] *  (x * sigma)
	if (rsigma) rscore <- cbind(rscore, deriv[,6] * sigma)
	temp <-  rscore %*% vv
	rr <- apply(rscore * temp, 1 , sum)
	}

    else if (type=='ldshape') {
	sscore <- deriv[,6] *x
	if (rsigma) sscore <- cbind(sscore,  deriv[,5]) 
	temp <- sscore %*% vv
	rr <- apply(sscore * temp, 1, sum)
	}

   else {  #type = matrix
	rr <- deriv
	}

    #
    # Multiply up by case weights, if requested
    #
    if (weighted) rr <- rr * weights

    #Expand out the missing values in the result
    if (!is.null(object$na.action)) {
	rr <- naresid(object$na.action, rr)
	if (is.matrix(rr)) n <- nrow(rr)
	else               n <- length(rr)
	}

    # Collapse if desired
    if (!missing(collapse)) {
	if (length(collapse) !=n) stop("Wrong length for 'collapse'")
	rr <- rowsum(rr, collapse)
	}

    rr
    }

	







# SCCS @(#)ridge.s	1.1 12/22/98
ridge <- function(..., theta, df=nvar/2, eps=.1, scale=TRUE) {
    x <- cbind(...)
    nvar <- ncol(x)
    xname <- as.character(parse(text=substitute(cbind(...))))[-1]
    vars <- apply(x, 2, function(z) var(z[!is.na(z)]))
    class(x) <- 'coxph.penalty'

    if (!missing(theta) && !missing(df))
	    stop("Only one of df or theta can be specified")

    if (scale) 
	    pfun <- function(coef,theta, ndead, scale) {
		list(penalty= sum(coef^2 *scale)*theta/2,
		     first  = theta*coef*scale,
		     second = theta*scale,
		     flag=FALSE)
		}
    else
	    pfun <- function(coef,theta, ndead, scale) {
		list(penalty= sum(coef^2)*theta/2,
		     first  = theta*coef,
		     second = theta,
		     flag=FALSE)
		}


    if (!missing(theta)) {
	temp <- list(pfun=pfun,
		     diag=TRUE,
		     cfun=function(parms, iter, history) {
				list(theta=parms$theta, done=TRUE) }, 
		     cparm=list(theta= theta),
		     pparm= vars,
		     varname=paste('ridge(', xname, ')', sep=''))
	}
    else {
	temp <- list(pfun=pfun,
		     diag=TRUE,
		     cfun=frailty.controldf,
		     cargs = 'df',
		     cparm=list(df=df, eps=eps, thetas=0, dfs=nvar,
		         guess=1),
		     pparm= vars,
		     varname=paste('ridge(', xname, ')', sep=''))
	}
	
    attributes(x) <- c(attributes(x), temp)
    x
    }
# SCCS @(#)strata.s	5.2 08/30/98
# Create a strata variable, possibly from many objects
#
strata <- function(..., na.group=FALSE, shortlabel=FALSE) {
    words <- as.character((match.call())[-1])
    if (!missing(na.group)) words <- words[-1]
    allf <- list(...)
    if(length(allf) == 1 && is.list(ttt <- unclass(allf[[1]]))) {
	    allf <- ttt
	    words <- names(ttt)
	    }
    nterms <- length(allf)
    what <- allf[[1]]
    if(is.null(levels(what)))
	    what <- factor(what)
    levs <- unclass(what) - 1
    wlab <- levels(what)
    if (na.group && any(is.na(what))){
	levs[is.na(levs)] <- length(wlab)
	wlab <- c(wlab, as.character(NA))
	}
    if (shortlabel)
        labs <- wlab
    else
        labs <- paste(words[1], wlab, sep='=')
    for (i in (1:nterms)[-1]) {
	what <- allf[[i]]
	if(is.null(levels(what)))
		what <- factor(what)
	wlab <- levels(what)
	wlev <- unclass(what) - 1
	if (na.group && any(is.na(wlev))){
	    wlev[is.na(wlev)] <- length(wlab)
	    wlab <- c(wlab, as.character(NA))
	    }
	if (!shortlabel) wlab <- format(paste(words[i], wlab, sep='='))
	levs <- wlev + levs*(length(wlab))
	labs <- paste(rep(labs, rep(length(wlab), length(labs))),
		      rep(wlab, length(labs)), sep=', ')
	}
    levs <- levs + 1
    ulevs <- sort(unique(levs[!is.na(levs)]))
    levs <- match(levs, ulevs)
    labs <- labs[ulevs]
    factor(levs, labels=labs)
    }
#SCCS @(#)summary.coxph.penal.s	1.2 01/14/99
summary.coxph.penal <-
 function(object, conf.int = 0.95, scale = 1, terms=FALSE, maxlabel=25,
			digits = max(options()$digits - 4, 3),...) {
    if (!is.null(object$call)) {
	cat("Call:\n")
	dput(object$call)
	cat("\n")
	}
    if (!is.null(object$fail)) {
	cat(" Coxreg failed.", object$fail, "\n")
	return()
	}
    savedig <- options(digits = digits)
    on.exit(options(savedig))

    omit <- object$na.action
    if (length(omit))
	cat("  n=", object$n, " (", naprint(omit), ")\n", sep="")
    else cat("  n=", object$n, "\n")

    coef <- object$coef
    if (length(coef)==0 && length(object$frail)==0)
            stop("Penalized summary function can't be used for a null model")

    if (length(coef) > 0) {
	nacoef <- !(is.na(coef))          #non-missing coefs
	coef2 <- coef[nacoef]
	if(is.null(coef) | is.null(object$var))
		stop("Input is not valid")
	se <- sqrt(diag(object$var))
	}
    #
    # Map terms to special print functions, and the list of iteration histories
    #
    pterms <- object$pterms
    nterms <- length(pterms)
    npenal <- sum(pterms>0)
    print.map <- rep(0,nterms)
    if (!is.null(object$printfun)) {
	temp <- unlist(lapply(object$printfun, is.null))  #which ones are missing
	print.map[pterms>0] <- (1:npenal) * (!temp)
	}

    # Tedious, but build up the coef matrix a term at a time
    print1 <- NULL
    pname1 <- NULL
    if (is.null(object$assign2)) alist <- object$assign[-1]
    else alist <- object$assign2

    print2 <- NULL
    for (i in 1:nterms) {
	kk <- alist[[i]]
	if (print.map[i] >0) {
	    j <- print.map[i]	
	    if (pterms[i]==2) 
		 temp <- (object$printfun[[j]])(object$frail, object$fvar, ,
				  object$df[i], object$history[[j]])
	    else temp <- (object$printfun[[j]])(coef[kk], object$var[kk,kk], 
					   object$var2[kk,kk], 
					   object$df[i], object$history[[j]])
	    print1 <- rbind(print1, temp$coef)
	    if (is.matrix(temp$coef)) {
		xx <- dimnames(temp$coef)[[1]]
		if (is.null(xx))
			xx <- rep(names(pterms)[i], nrow(temp$coef))
		else    xx <- paste(names(pterms)[i], xx, sep=', ')
		pname1 <- c(pname1, xx)
		}
	    else  pname1 <- c(pname1, names(pterms)[i])
	    print2 <- c(print2, temp$history)
	    }

	else if (terms && length(kk)>1) {
	    pname1 <- c(pname1, names(pterms)[i])
	    temp <- coxph.wtest(object$var[kk,kk], coef[kk])$test
	    print1 <- rbind(print1, c(NA, NA, NA,
				      temp, object$df[i], 1-pchisq(temp, 1)))
	    }
	else {
	    pname1 <- c(pname1, names(coef)[kk])
	    tempe<- (diag(object$var))[kk]
	    temp <- coef[kk]^2/ tempe
	    print1 <- rbind(print1, cbind(coef[kk], sqrt(tempe),
				      sqrt((diag(object$var2))[kk]), 
				      temp, 1, 1-pchisq(temp, 1)))
	    }
	}

    # Format out the NA's 
    temp <- cbind(format(print1[,1]), format(print1[,2]), 
		       format(print1[,3]),
		       format(round(print1[,4], 2)),
		       format(round(print1[,5], 2)),
		       format(signif(print1[,6], 2)))
    temp <- ifelse(is.na(print1), "", temp)
    dimnames(temp) <- list(substring(pname1,1, maxlabel), 
			     c("coef","se(coef)", "se2", "Chisq","DF","p"))
    prmatrix(temp, quote=FALSE)

    if(conf.int & length(coef) >0 ) {
        z <- qnorm((1 + conf.int)/2, 0, 1)
        coef <- coef * scale
        se <- se * scale
        tmp <- cbind(exp(coef), exp(-coef), exp(coef - z * se),
            exp(coef + z * se))
        dimnames(tmp) <- list(substring(names(coef),1, maxlabel), 
			      c("exp(coef)", "exp(-coef)",
            paste("lower .", round(100 * conf.int, 2), sep = ""),
            paste("upper .", round(100 * conf.int, 2), sep = "")))
        cat("\n")
        prmatrix(tmp)
        }
    logtest <- -2 * (object$loglik[1] - object$loglik[2])
    sctest <- object$score

    cat("\nIterations:", object$iter[1], "outer,", object$iter[2], 
        "Newton-Raphson\n")
    if (length(print2)) {
        for (i in 1:length(print2)) cat("    ", print2[i], "\n")
        }
    if (is.null(object$df)) df <- sum(!is.na(coef))
    else  df <- round(sum(object$df),2)
    cat("Degrees of freedom for terms=", format(round(object$df,1)), "\n")
    cat("Rsquare=", format(round(1-exp(-logtest/object$n),3)),
	"  (max possible=", format(round(1-exp(2*object$loglik[1]/object$n),3)),
	")\n" )
    cat("Likelihood ratio test= ", format(round(logtest, 2)), "  on ",
	df, " df,", "   p=", format(1 - pchisq(logtest, df)),
	"\n", sep = "")
    if (!is.null(object$wald.test))
        cat("Wald test            = ", format(round(object$wald.test, 2)), 
	    "  on ", df, " df,   p=",
	    format(1 - pchisq(object$wald.test, df)), sep = "")
    if (!is.null(object$score))
        cat("\nScore (logrank) test = ", format(round(sctest, 2)), "  on ", df,
            " df,", "   p=", format(1 - pchisq(sctest, df)), sep ="") 
    if (is.null(object$rscore)) cat("\n")
    else cat(",   Robust = ", format(round(object$rscore, 2)), 
	   "  p=", format(1 - pchisq(object$rscore, df)), "\n", sep="")   

    invisible()
    }
#SCCS 03/25/97 @(#)summary.coxph.s	4.8
summary.coxph <-
 function(object, table = TRUE, coef = TRUE, conf.int = 0.95, scale = 1,
			digits = max(options()$digits - 4, 3), ...)
    {
        cox<-object
    if (!is.null(cl<- cox$call)) {
	cat("Call:\n")
	dput(cl)
	cat("\n")
	}
    if (!is.null(cox$fail)) {
	cat(" Coxreg failed.", cox$fail, "\n")
	return()
	}
    savedig <- options(digits = digits)
    on.exit(options(savedig))

    omit <- cox$na.action
    if (length(omit))
	cat("  n=", cox$n, " (", naprint(omit), ")\n", sep="")
    else cat("  n=", cox$n, "\n")
    if (length(cox$icc))
	cat("  robust variance based on", cox$icc[1],
	    "groups, intra-class correlation =", format(cox$icc[2:3]), "\n")
    if (is.null(cox$coef)) {   # Null model
	cat ("   Null model\n")
	return()
	}

    beta <- cox$coef
    nabeta <- !(is.na(beta))          #non-missing coefs
    beta2 <- beta[nabeta]
    if(is.null(beta) | is.null(cox$var))
        stop("Input is not valid")
    se <- sqrt(diag(cox$var))
    if (!is.null(cox$naive.var)) nse <- sqrt(diag(cox$naive.var))

    if(coef) {
	if (is.null(cox$naive.var)) {
	    tmp <- cbind(beta, exp(beta), se, beta/se,
		   signif(1 - pchisq((beta/ se)^2, 1), digits -1))
	    dimnames(tmp) <- list(names(beta), c("coef", "exp(coef)",
		"se(coef)", "z", "p"))
	    }
	else {
	    tmp <- cbind(beta, exp(beta), nse, se, beta/se,
		   signif(1 - pchisq((beta/ se)^2, 1), digits -1))
	    dimnames(tmp) <- list(names(beta), c("coef", "exp(coef)",
		"se(coef)", "robust se", "z", "p"))
	    }
        cat("\n")
        prmatrix(tmp)
        }
    if(conf.int) {
        z <- qnorm((1 + conf.int)/2, 0, 1)
        beta <- beta * scale
        se <- se * scale
        tmp <- cbind(exp(beta), exp(-beta), exp(beta - z * se),
            exp(beta + z * se))
        dimnames(tmp) <- list(names(beta), c("exp(coef)", "exp(-coef)",
            paste("lower .", round(100 * conf.int, 2), sep = ""),
            paste("upper .", round(100 * conf.int, 2), sep = "")))
        cat("\n")
        prmatrix(tmp)
        }
    logtest <- -2 * (cox$loglik[1] - cox$loglik[2])
    sctest <- cox$score
    df <- length(beta2)
    cat("\n")
    cat("Rsquare=", format(round(1-exp(-logtest/cox$n),3)),
	"  (max possible=", format(round(1-exp(2*cox$loglik[1]/cox$n),3)),
	")\n" )
    cat("Likelihood ratio test= ", format(round(logtest, 2)), "  on ",
	df, " df,", "   p=", format(1 - pchisq(logtest, df)),
	"\n", sep = "")
    cat("Wald test            = ", format(round(cox$wald.test, 2)), "  on ",
	df, " df,", "   p=", format(1 - pchisq(cox$wald.test, df)),
	"\n", sep = "")
    cat("Score (logrank) test = ", format(round(sctest, 2)), "  on ", df,
        " df,", "   p=", format(1 - pchisq(sctest, df)), sep ="") 
    if (is.null(cox$rscore)) cat("\n\n")
    else cat(",   Robust = ", format(round(cox$rscore, 2)), 
	   "  p=", format(1 - pchisq(cox$rscore, df)), "\n\n", sep="")   

    if (!is.null(cox$naive.var))
	cat("  (Note: the likelihood ratio and score tests",
	  "assume independence of\n     observations within a cluster,",
	    "the Wald and robust score tests do not).\n")
    invisible()
    }
# SCCS @(#)summary.ratetable.s	1.1 11/03/98
#
# Print out information about a rate table: it's dimensions and keywords
#
summary.ratetable <- function(object, ...) {
    rtable<-object
    if (!inherits(rtable, 'ratetable')) stop("Argument is not a rate table")

    att <- attributes(rtable)
    ncat <- length(dim(rtable))
    cat (" Rate table with", ncat, "dimensions:\n")
    for (i in 1:ncat) {
	if (att$factor[i]==0) {
	    cat("\t", att$dimid[i], " ranges from ", 
		format(min(att$cutpoints[[i]])), " to ", 
		format(max(att$cutpoints[[i]])), "; with ", att$dim[i],
		" categories\n", sep='')
	    }
	else if(att$factor[i]==1) {
	     cat("\t", att$dimid[i], " has levels of: ",
		 paste(att$dimnames[[i]], collapse=' '), "\n", sep='')
	     }
	else {
	    cat("\t", att$dimid[i], " ranges from " , 
		format(min(att$cutpoints[[i]])), " to ", 
		format(max(att$cutpoints[[i]])), "; with ", att$dim[i],
		" categories,\n\t\tlinearly interpolated in ",
		att$factor[i], " steps per division\n", sep='')
	    }
	}
    invisible(att)
    }

# SCCS @(#)summary.survfit.s	5.1 08/30/98
summary.survfit <- function(object, times, censored=FALSE, scale=1, ...) {
    fit<-object
    if (!inherits(fit, 'survfit'))
	    stop("Invalid data")
    missing.times<-missing(times)
    n <- length(fit$time)
    stime <- fit$time/scale
    if (is.null(fit$strata)) {
	stemp <- rep(1,n)
	nstrat <- 1
	}
    else {
	nstrat <- length(fit$strata)
	stemp <- rep(1:nstrat,fit$strata)
	}

    surv <- as.matrix(fit$surv)
    if (is.null(fit$std.err)) std.err <- NULL
    else                      std.err <- fit$std.err * surv

    if (!is.null(fit$lower)) {
	lower <- as.matrix(fit$lower)
	upper <- as.matrix(fit$upper)
	}

    if (missing.times) {
	if (censored) {
	    times <- stime
	    n.risk<- fit$n.risk
	    n.event <- fit$n.event
	    }
	else {
	    who    <- (fit$n.event >0)
	    times  <-  stime[who]
	    n.risk <-  fit$n.risk[who]
	    n.event <- fit$n.event[who]
	    stemp <- stemp[who]
	    surv <- surv[who,,drop=FALSE]
	    if (!is.null(std.err)) std.err <- std.err[who,,drop=FALSE]
	    if (!is.null(fit$lower)) {
		lower <- lower[who,,drop=FALSE]
		upper <- upper[who,,drop=FALSE]
		}
	    }
	}

    else {  #this case is much harder
	if (any(times<0)) stop("Invalid time point requested")
        if(max(fit$time) < min(times))
            stop("Requested times are all beyond the end of the survival curve")
	if (length(times) >1 )
	    if (any(diff(times)<0)) stop("Times must be in increasing order")

	temp <- .C("survindex2", as.integer(n),
				  as.double(stime),
				  as.integer(stemp),
				  as.integer(length(times)),
				  as.double(times),
				  as.integer(nstrat),
				  indx = integer(nstrat*length(times)),
				  indx2= integer(nstrat*length(times)),
                   PACKAGE="survival")
	keep <- temp$indx >=0
	indx <- temp$indx[keep]
	ones <- (temp$indx2==1)[keep]
	ties <- (temp$indx2==2)[keep]  #data set time === requested time

	times <- rep(times, nstrat)[keep]
	n.risk <- fit$n.risk[indx+1 - (ties+ones)]
	surv   <- surv[indx,,drop=FALSE];   surv[ones,] <- 1
	if (!is.null(std.err)) {
	    std.err<- std.err[indx,,drop=FALSE]
	    std.err[ones,] <-0
	    }
	fit$n.event[stime>max(times)] <- 0
	n.event <- (cumsum(c(0,fit$n.event)))[ifelse(ones, indx, indx+1)]
	n.event<-  diff(c(0, n.event))

	if (!is.null(fit$lower)) {
	    lower <- lower[indx,,drop=FALSE];  lower[ones,] <- 1;
	    upper <- upper[indx,,drop=FALSE];  upper[ones,] <- 1;
	    }

	stemp <- stemp[indx]
	}

    ncurve <- ncol(surv)
    temp <- list(surv=surv, time=times, n.risk=n.risk, n.event=n.event,
			conf.int=fit$conf.int)
    if (ncurve==1) {
	temp$surv <- drop(temp$surv)
	if (!is.null(std.err)) temp$std.err <- drop(std.err)
	if (!is.null(fit$lower)) {
	    temp$lower <- drop(lower)
	    temp$upper <- drop(upper)
	    }
	}
    else {
	if (!is.null(std.err)) temp$std.err <- std.err
	if (!is.null(fit$lower)) {
	    temp$lower <- lower
	    temp$upper <- upper
	    }
	}
    if (!is.null(fit$strata))
	temp$strata <- factor(stemp,
	    labels = names(fit$strata)[sort(unique(stemp))])
    temp$call <- fit$call
    if (!is.null(fit$na.action)) temp$na.action <- fit$na.action
    class(temp) <- 'summary.survfit'
    temp
    }
# SCCS @(#)summary.survreg.s	4.15 02/11/99
summary.survreg<- function(object, correlation = FALSE,...)
{
    if (!is.null(object$fail)) {
	warning(" Survreg failed.", x$fail, "   No summary provided\n")
	return(invisible(object))
	}
    wt <- object$weights
    
    nvar0 <- length(object$coef)
    nvar <- nrow(object$var)
    if (nvar > nvar0) {
	coef <- c(object$coef, log(object$scale))
	if ( (nvar-nvar0)==1) cname <- c(names(object$coef), "Log(scale)")
	else cname <- c(names(object$coef), names(object$scale))
	}
    else {
	coef <- object$coef
	cname <- names(object$coef)
	}

    n <- length(object$linear.predictors)
    p <- sum(!is.na(coef))
    if(!p) {
        warning("This model has zero rank --- no summary is provided")
        return(invisible(object))
        }

    nsingular <- nvar - p
    table <- matrix(rep(coef, 4), ncol = 4)
    dimnames(table) <- list(cname, c("Value", "Std. Error", "z", "p"))
    stds <- sqrt(diag(object$var))
    table[, 2] <- stds
    table[, 3] <- table[, 1]/stds
    table[, 4] <- 2*pnorm(-abs(table[,3]))
    if(correlation) {
	nas <- is.na(coef)
	stds <- stds[!nas]
	correl <- diag(1/stds) %*% object$var[!nas, !nas] %*% diag(1/stds)
        dimnames(correl) <- list(cname, cname)
        }
    else correl <- NULL

    dist <- object$dist
    if (is.character(dist)) sd <- survreg.distributions[[dist]]
    else sd <- dist

    if (length(object$parms)) 
	    pprint<- paste(sd$name, 'distribution: parmameters=', object$parms)
    else    pprint<- paste(sd$name, 'distribution')

    x <- object[match(c('call', 'df', 'loglik', 'iter', 'na.action', 'idf',
			'scale', 'coefficients', 'var'), 
		      names(object), nomatch=0)]
    x <- c(x, list(table=table, correlation=correl, parms=pprint,
		   n=n, chi=2*diff(object$loglik)))

    class(x) <- 'summary.survreg'
    x
    }


survSplit<-function(data, cut, end,event,start,id=NULL,
                    zero=0,episode=NULL){

  cut<-sort(cut)
  ntimes <- length(cut)
  n <- nrow(data)
  p <- ncol(data)
  
  newdata <- lapply(data,rep,ntimes+1)

  endtime <- rep(c(cut, Inf) ,each=n)

  eventtime<-newdata[[end]]

  if( start %in% names(data))
    starttime<-data[[start]]
  else
    starttime<-rep(zero,length=n)

  starttime<-c(starttime, rep(cut,each=n))

  epi<-rep(0:ntimes,each=n)
  
  status <- ifelse( eventtime <= endtime & eventtime>starttime,
                   newdata[[event]], 0)
  endtime<- pmin(endtime,eventtime)

  drop<-starttime>=endtime
  
  newdata<-do.call("data.frame",newdata)
  newdata[,start]<-starttime
  newdata[,end]<-endtime
  newdata[,event]<-status
  if (!is.null(id))
    newdata[,id]<-rep(rownames(data),ntimes+1)
  if (!is.null(episode))
    newdata[,episode]<-epi
  
  newdata<-newdata[!drop,]

  newdata

}
#SCCS @(#)survdiff.fit.s	1.1 01/07/96
survdiff.fit <- function(y, x, strat, rho=0) {
    #
    # This routine is almost always called from survdiff
    #  If called directly, remember that it does no error checking
    #
    n <- length(x)
    if (ncol(y) !=2) stop ("Invalid y matrix")
    if (nrow(y) !=n | length(x) !=n) stop("Data length mismatch")

    ngroup <- length(unique(x))
    if (ngroup <2) stop ("There is only 1 group")
    if (is.category(x)) x <- as.numeric(x)
    else x <- match(x, unique(x))

    if (missing(strat)) strat <- rep(1,n)
    else strat <- as.numeric(as.factor(strat))
    nstrat <- length(unique(strat))
    if (length(strat) !=n) stop("Data length mismatch")

    ord <- order(strat, y[,1], -y[,2])
    strat2 <- c(1*(diff(strat[ord])!=0), 1)

    xx <- .C("survdiff2", as.integer(n),
		   as.integer(ngroup),
		   as.integer(nstrat),
		   as.double(rho),
		   as.double(y[ord,1]),
		   as.integer(y[ord,2]),
		   as.integer(x[ord]),
		   as.integer(strat2),
		   observed = double(ngroup*nstrat),
		   expected = double(ngroup*nstrat),
		   var.e    = double(ngroup * ngroup),
		   double(ngroup), double(n),
             PACKAGE="survival")

    if (nstrat==1)  list(expected = xx$expected,
			 observed = xx$observed,
			 var      = matrix(xx$var, ngroup, ngroup))
    else            list(expected = matrix(xx$expected, ngroup),
			 observed = matrix(xx$observed, ngroup),
			 var      = matrix(xx$var, ngroup, ngroup))
    }
#SCCS 08/30/98 @(#)survdiff.s	5.1
survdiff <- function(formula, data, subset, na.action, rho=0) {
    call <- match.call()
    m <- match.call(expand=FALSE)
    m$rho <- NULL

    Terms <- if(missing(data)) terms(formula, 'strata')
	     else              terms(formula, 'strata', data=data)
    m$formula <- Terms
    m[[1]] <- as.name("model.frame")
    m <- eval(m, parent.frame())
    y <- model.extract(m, response)
    if (!inherits(y, "Surv")) stop("Response must be a survival object")
    if (attr(y, 'type') != 'right') stop("Right censored data only")
    ny <- ncol(y)
    n <- nrow(y)

    offset<- attr(Terms, "offset")
    if (!is.null(offset)) {
	#one sample test
	offset <- as.numeric(m[[offset]])
	if (length(attr(Terms,"factors"))>0) stop("Cannot have both an offset and groups")
	if (any(offset <0 | offset >1))
	    stop("The offset must be a survival probability")
	expected <- sum(1-offset)
	observed <- sum(y[,ny])
	if (rho!=0) {
	    num <- sum(1/rho - ((1/rho + y[,ny])*offset^rho))
	    var <- sum(1- offset^(2*rho))/(2*rho)
	    }
	else {
	    var <-  sum(-log(offset))
	    num <-  var - observed
	    }
	chi <- num*num/var
	rval <-list(n= n, obs = observed, exp=expected, var=var,
			chisq= chi)
	}

    else { #k sample test
	strats <- attr(Terms, "specials")$strata
	if (length(strats)) {
	    temp <- untangle.specials(Terms, 'strata', 1)
	    dropx <- temp$terms
	    if (length(temp$vars)==1) strata.keep <- m[[temp$vars]]
	    else strata.keep <- strata(m[,temp$vars], shortlabel=TRUE)
	    }
	else strata.keep <- rep(1,nrow(m))

	#Now create the group variable
	if (length(strats)) ll <- attr(Terms[-dropx], 'term.labels')
	else                ll <- attr(Terms, 'term.labels')
	if (length(ll) == 0) stop("No groups to test")
	else groups <- strata(m[ll])

	fit <- survdiff.fit(y, groups, strata.keep, rho)
	if (is.matrix(fit$observed)){
	    otmp <- apply(fit$observed,1,sum)
	    etmp <- apply(fit$expected,1,sum)
	    }
	else {
	    otmp <- fit$observed
	    etmp <- fit$expected
	    }
	df   <- (etmp >0)                #remove groups with exp=0
	if (sum(df) <2) chi <- 0         # No test, actually
	else {
	    temp2 <- ((otmp - etmp)[df])[-1]
	    vv <- (fit$var[df,df])[-1,-1, drop=FALSE]
	    chi <- sum(solve(vv, temp2) * temp2)
	    }

	rval <-list(n= table(groups), obs = fit$observed,
		    exp = fit$expected, var=fit$var,  chisq=chi)
	if (length(strats)) rval$strata <- table(strata.keep)
	}

    na.action <- attr(m, "na.action")
    if (length(na.action)) rval$na.action <- na.action
    rval$call <- call
    class(rval) <- 'survdiff'
    rval
    }
#  SCCS @(#)survexp.cfit.s	5.1 08/30/98
#
#  Do expected survival based on a Cox model
#   A fair bit of the setup work is identical to survfit.coxph, i.e.,
#     to reconstruct the data frame
#
#  The execution path for individual survival is completely separate, and
#    a whole lot simpler.
#
survexp.cfit <- function(x, y, death, individual, cox, se.fit, method) {
    if (!is.matrix(x)) stop("x must be a matrix")

    #
    # If it is individual survival, things are fairly easy
    #    (the parent routine has guarranteed NO strata in the Cox model
    #
    if (individual) {
	fit <- survfit.coxph(cox, se.fit=FALSE)
	risk <- x[,-1,drop=FALSE] %*% cox$coef  -  sum(cox$coef *cox$means)
	nt <- length(fit$time)
	surv <- approx(-c(0,fit$time), c(1,fit$surv), -y,
				method='constant', rule=2, f=1)$y
	return(list(times=y, surv=c(surv^(exp(risk)))))
	}

    # Otherwise, get on with the real work
    temp <- coxph.getdata(cox, y=TRUE, x=se.fit, strata=FALSE)
    cy <- temp$y
    cx <- temp$x
    cn <- nrow(cy)
    nvar <- length(cox$coef)

    if (ncol(x) != (1+ nvar))
	stop("x matrix does not match the cox fit")

    ngrp <- max(x[,1])
    if (!is.logical(death)) stop("Invalid value for death indicator")

    if (missing(method))
	method <- (1 + 1*(cox$method=='breslow') +2*(cox$method=='efron')
		     + 10*(death))
    else stop("Program unfinished")

    #
    # Data appears ok so proceed
    #  First sort the old data set
    # Also, expand y to (start, stop] form.  This leads to slower processing,
    #  but I only have to program one case instead of 2.
    if (ncol(cy) ==2) {
  	mintime <- min(cy[,1])
 	if (mintime < 0) cy <- cbind(2*mintime-1, cy)
 	else	       cy <- cbind(-1, cy)
 	}
    ord <- order(cy[,2], -cy[,3])
    cy  <- cy[ord,]
    score <- exp(cox$linear.predictors[ord])
    if (se.fit) cx <- cx[ord,]
    else  cx <- 0   #dummy, for .C call


    #
    # Process the new data
    #
    if (missing(y) || is.null(y)) y <- rep(max(cy[,2]), nrow(x))
    ord <- order(x[,1])
    x[,1] <- x[,1] - min(x[,1])
    n <- nrow(x)
    ncurve <- length(unique(x[,1]))
    npt <- length(unique(cy[cy[,3]==1,2]))  #unique death times
    storage.mode(cy) <- 'double'
    xxx  <- .C('agsurv3', as.integer(n),
			  as.integer(nvar),
			  as.integer(ncurve),
			  as.integer(npt),
			  as.integer(se.fit),
			  as.double(score),
			  y = as.double(y[ord]),
			  x[ord,],
			  cox$coef,
			  cox$var,
			  cox$means,
			  as.integer(cn),
			  cy = cy,
			  as.double(cx),
			  surv = matrix(0.0, npt, ncurve),
			  varhaz = matrix(0.0, npt, ncurve),
			  nrisk  = matrix(0.0, npt, ncurve),
			  as.integer(method), PACKAGE="survival")

    surv <- apply(xxx$surv, 2, cumprod)
    if (se.fit)
	list(surv=surv, n=xxx$nrisk, times=xxx$cy[1:npt],
			se=sqrt(xxx$varhaz)/surv)
    else
	list(surv=surv, n=xxx$nrisk, times=xxx$cy[1:npt,1] )
    }
#  SCCS @(#)survexp.fit.s	5.2 11/03/98
#  Actually compute the expected survival for one or more cohorts
#    of subjects.  If each subject is his/her own group, it gives individual
#    survival
survexp.fit <- function(x, y, times, death, ratetable) {
    if (!is.matrix(x)) stop("x must be a matrix")
    if (ncol(x) != (1+length(dim(ratetable))))
	stop("x matrix does not match the rate table")
    atts <- attributes(ratetable)
    rfac <- atts$factor
    if (length(rfac) != ncol(x)-1) stop("Wrong length for rfac")
    ngrp <- max(x[,1])
    times <- sort(unique(times))
    if (any(times <0)) stop("Negative time point requested")
    if (missing(y))  y <- rep(max(times), nrow(x))
    ntime <- length(times)
    if (!is.logical(death)) stop("Invalid value for death indicator")

    cuts <- atts$cutpoints
    us.special <- (rfac >1)
    if (any(us.special)) {  #special handling for US pop tables
	if (sum(us.special) >1)
	    stop("Two columns marked for special handling as a US rate table")
	#slide entry date so that it appears that they were born on Jan 1
	cols <- 1+match(c("age", "year"), attr(ratetable, "dimid"))
	if (any(is.na(cols))) stop("Ratetable does not have expected shape")
	temp <- date.mdy(as.date(x[,cols[2]]) -x[,cols[1]])
	x[,cols[2]] <- x[,cols[2]] - mdy.date(temp$month, temp$day, 1960)
	# Doctor up "cutpoints"
	temp <- (1:length(rfac))[us.special]
	nyear <- length(cuts[[temp]])
	nint <- rfac[temp]       #intervals to interpolate over
	cuts[[temp]] <- round(approx(nint*(1:nyear), cuts[[temp]],
					nint:(nint*nyear))$y - .0001)
	}
    temp <- .C('pyears3',
		    as.integer(death),
		    as.integer(nrow(x)),
		    as.integer(length(atts$dim)),
		    as.integer(rfac),
		    as.integer(atts$dim),
		    as.double(unlist(cuts)),
		    ratetable,
		    as.double(x),
		    as.double(y),
		    as.integer(ntime),
		    as.integer(ngrp),
		    as.double(times),
		    surv = double(ntime * ngrp),
		    n   = integer(ntime *ngrp),
               PACKAGE="survival")
    if (ntime==1) list(surv=temp$surv, n=temp$n)
    else if (ngrp >1)
	 list(surv=apply(matrix(temp$surv, ntime, ngrp),2,cumprod),
		 n=   matrix(temp$n, ntime, ngrp))
    else list(surv=cumprod(temp$surv), n=temp$n)
    }
# SCCS @(#)survexp.s	5.2 02/19/99

survexp <- function(formula=formula(data), data=parent.frame(),
	weights, subset, na.action,
	times,  cohort=TRUE,  conditional=FALSE,
	ratetable=survexp.us, scale=1, npoints, se.fit,
	model=FALSE, x=FALSE, y=FALSE) {

    call <- match.call()
    m <- match.call(expand=FALSE)
    m$ratetable <- m$model <- m$x <- m$y <- m$scale<- m$cohort <- NULL
    m$times <- m$conditional <- m$npoints <- m$se.fit <- NULL

    Terms <- if(missing(data)) terms(formula, 'ratetable')
	     else              terms(formula, 'ratetable',data=data)

    rate <- attr(Terms, "specials")$ratetable
    if(length(rate) > 1)
	    stop("Can have only 1 ratetable() call in a formula")
    if(length(rate) == 0) {
	# add a 'ratetable' call to the internal formula
        # The dummy function stops an annoying warning message "Looking for
        #  'formula' of mode function, ignored one of mode ..."
	xx <- function(x) formula(x)
    
	if(is.ratetable(ratetable))   varlist <- attr(ratetable, "dimid")
	else if(inherits(ratetable, "coxph")) {
	    varlist <- names(ratetable$coef)
	    # Now remove "log" and such things, using terms.inner
	    temp <- terms.inner(xx(paste("~", paste(varlist, collapse='+'))))
	    varlist <- attr(temp, 'term.labels')
            }
	else stop("Invalid rate table")

	ftemp <- deparse(substitute(formula))
	formula <- xx( paste( ftemp, "+ ratetable(",
			  paste( varlist, "=", varlist, collapse = ","), ")"))
	Terms <- if (missing(data)) terms(formula, "ratetable")
	         else               terms(formula, "ratetable", data = data)
	rate <- attr(Terms, "specials")$ratetable
	}

    m$formula <- Terms
    m[[1]] <- as.name("model.frame")
    m <- eval(m, parent.frame())
    n <- nrow(m)

    if (any(attr(Terms, 'order') >1))
	    stop("Survexp cannot have interaction terms")
    if (!missing(times)) {
	if (any(times<0)) stop("Invalid time point requested")
	if (length(times) >1 )
	    if (any(diff(times)<0)) stop("Times must be in increasing order")
	}

    Y <- model.extract(m, 'response')
    no.Y <- is.null(Y)
    if (!no.Y) {
	if (is.matrix(Y)) {
	    if (is.Surv(Y) && attr(Y, 'type')=='right') Y <- Y[,1]
	    else stop("Illegal response value")
	    }
	if (any(Y<0)) stop ("Negative follow up time")
	if (missing(npoints)) temp <- unique(Y)
	else                  temp <- seq(min(Y), max(Y), length=npoints)
	if (missing(times)) newtime <- sort(temp)
	else  newtime <- sort(unique(c(times, temp[temp<max(times)])))
	}
    else conditional <- FALSE
    weights <- model.extract(m, 'weights')
    if (!is.null(weights)) warning("Weights ignored")

    if (no.Y) ovars <- attr(Terms, 'term.labels')[-rate]
    else      ovars <- attr(Terms, 'term.labels')[-(rate-1)]
	
    if (is.ratetable(ratetable)) {
	israte <- TRUE
	if (no.Y) {
	    if (missing(times))
	       stop("There is no times argument, and no follow-up times are given in the formula")
	    else newtime <- sort(unique(times))
	    Y <- rep(max(times), n)
	    }
	se.fit <- FALSE
	rtemp <- match.ratetable(m[,rate], ratetable)
	R <- rtemp$R
	if (!is.null(rtemp$call)) {  #need to dop some dimensions from ratetable
	    ratetable <- eval(parse(text=rtemp$call))
	    }
       }
    else if (inherits(ratetable, 'coxph')) {
	israte <- FALSE
	Terms <- ratetable$terms
	if (!inherits(Terms, 'terms'))
		stop("invalid terms component of fit")
	if (!is.null(attr(Terms, 'offset')))
	    stop("Cannot deal with models that contain an offset")
	m2 <- data.frame(unclass(m[,rate]))
	strats <- attr(Terms, "specials")$strata
	if (length(strats))
	    stop("survexp cannot handle stratified Cox models")
	R <- model.matrix(delete.response(Terms), m2)[,-1,drop=FALSE]
	if (any(dimnames(R)[[2]] != names(ratetable$coef)))
	    stop("Unable to match new data to old formula")
	if (no.Y) {
	    if (missing(se.fit)) se.fit <- TRUE
	    }
	else se.fit <- FALSE
	}
    else stop("Invalid ratetable argument")

    if (cohort) {
	# Now process the other (non-ratetable) variables
	if (length(ovars)==0)  X <- rep(1,n)  #no categories
	else {
	    odim <- length(ovars)
	    for (i in 1:odim) {
		temp <- m[[ovars[i]]]
		ctemp <- class(temp)
		if (!is.null(ctemp) && ctemp=='tcut')
		    stop("Can't use tcut variables in expected survival")
		}
	    X <- strata(m[ovars])
	    }

	#do the work
	if (israte)
	    temp <- survexp.fit(cbind(as.numeric(X),R), Y, newtime,
			       conditional, ratetable)
	else {
	    temp <- survexp.cfit(cbind(as.numeric(X),R), Y, conditional, FALSE,
			       ratetable, se.fit=se.fit)
	    newtime <- temp$times
	    }
	#package the results
	if (missing(times)) {
	    n.risk <- temp$n
	    surv <- temp$surv
	    if (se.fit) err <- temp$se
	    }
	else {
	    if (israte) keep <- match(times, newtime)
	    else {
		# taken straight out of summary.survfit....
		n <- length(temp$times)
		temp2 <- .C("survindex2", as.integer(n),
					  as.double(temp$times),
					  as.integer(rep(1,n)),
					  as.integer(length(times)),
					  as.double(times),
					  as.integer(1),
					  indx = integer(length(times)),
					  indx2= integer(length(times)),
                            PACKAGE="survival")
		keep <- temp2$indx[temp2$indx>0]
		}

	    if (is.matrix(temp$surv)) {
		surv <- temp$surv[keep,,drop=FALSE]
		n.risk <- temp$n[keep,,drop=FALSE]
		if (se.fit) err <- temp$se[keep,,drop=FALSE]
		}
	    else {
		surv <- temp$surv[keep]
		n.risk <- temp$n[keep]
		if (se.fit) err <- temp$se[keep]
		}
	    newtime <- times
	    }
	newtime <- newtime/scale
	if (length(ovars)) {    #matrix output
	    if (no.Y && israte){ # n's are all the same, so just send a vector
		dimnames(surv) <- list(NULL, levels(X))
		out <- list(call=call, surv=surv, n.risk=c(n.risk[,1]),
			    time=newtime)
		}
	    else {
		#Need a matrix of n's, and a strata component
		out <- list(call=call, surv=surv, n.risk=n.risk,
				time = newtime)
		tstrat <- rep(nrow(surv), ncol(surv))
		names(tstrat) <- levels(X)
		out$strata <- tstrat
		}
	    if (se.fit) out$std.err <- err
	    }
	else {
	     out <- list(call=call, surv=c(surv), n.risk=c(n.risk),
			   time=newtime)
	     if (se.fit) out$std.err <- c(err)
	     }

	na.action <- attr(m, "na.action")
	if (length(na.action))  out$na.action <- na.action
	if (model) out$model <- m
	else {
	    if (x) out$x <- structure(cbind(X, R),
		dimnames=list(row.names(m), c("group", dimid)))
	    if (y) out$y <- Y
	    }
	if (israte && !is.null(rtemp$summ)) out$summ <- rtemp$summ
	if (no.Y) out$method <- 'exact'
	else if (conditional) out$method <- 'conditional'
	else                  out$method <- 'cohort'
	class(out) <- c('survexp', 'survfit')
	out
	}

    else { #individual survival
	if (no.Y) stop("For non-cohort, an observation time must be given")
	if (israte)
	    temp <- survexp.fit (cbind(1:n,R), Y, max(Y), TRUE, ratetable)
	else temp<- survexp.cfit(cbind(1:n,R), Y, FALSE, TRUE, ratetable, FALSE)
	xx <- temp$surv
	names(xx) <- row.names(m)
	na.action <- attr(m, "na.action")
	if (length(na.action)) naresid(na.action, xx)
	else xx
	}
    }
# SCCS @(#)survfit.coxph.null.s	5.5 07/09/00
survfit.coxph.null <-
  function(object, newdata, se.fit=TRUE, conf.int=.95, individual=FALSE,
	    type, vartype,
	    conf.type=c('log', 'log-log', 'plain', 'none'), ...) {
    # May have strata and/or offset terms, linear predictor = offset
    #  newdata doesn't make any sense
    #  This is survfit.coxph with lots of lines removed

    call <- match.call()
    Terms <- terms(object)
    strat <- attr(Terms, "specials")$strata
    n <- object$n
    score <- exp(object$linear.predictor)

    temp <- c('aalen', 'kalbfleisch-prentice', 'efron',
	           'tsiatis', 'breslow', 'kaplan-meier', 'fleming-harringon',
	           'greenwood', 'exact')
    temp2 <- c(2,1,3,2,2,1,3,1,1)
    if (missing(type)) type <- object$method
    if (missing(vartype)) vartype <- type
    method <- temp2[match(match.arg(type, temp), temp)]
    if (is.na(method)) stop("Invalid survival curve type")
    vartype <- temp2[match(match.arg(vartype, temp), temp)]
    if (is.na(vartype)) stop("Invalid variance type specified")

    if (!se.fit) conf.type <- 'none'
    else conf.type <- match.arg(conf.type)

    y <- object$y
    stratx <- object$strata
    if (is.null(y) || (length(strat) && is.null(stratx))) {
	# I need the model frame
	m <- model.frame(object)
	if (is.null(stratx)) {
	    temp <- untangle.specials(Terms, 'strata', 1)
	    stratx <- strata(m[temp$vars])
	    strata.all <- table(stratx)
	    }
	if (is.null(y)) y <- model.extract(m, 'response')
	}
    if (is.null(stratx)) stratx <- rep(1,n)
    ny <- ncol(y)
    if (nrow(y) != n) stop ("Mismatched lengths: logic error")

    type <- attr(y, 'type')
    if (type=='counting') {
	ord <- order(stratx, y[,2], -y[,3])
	if (method=='kaplan-meier')
	      stop ("KM method not valid for counting type data")
	}
    else if (type=='right') {
	ord <- order(stratx, y[,1], -y[,2])
        miny <- min(y[,1])
        if (miny < 0) y <- cbind(2*miny -1, y)
        else          y <- cbind(-1, y)
	}
    else stop("Cannot handle \"", type, "\" type survival data")

    if (length(strat)) {
	newstrat <- (as.numeric(stratx))[ord]
	newstrat <- as.integer(c(1*(diff(newstrat)!=0), 1))
	}
    else newstrat <- as.integer(c(rep(0,n-1),1))

    if ( !missing(newdata))
	stop("A newdata argument does not make sense for a null model")

    dimnames(y) <- NULL   #I only use part of Y, so names become invalid
    storage.mode(y) <- 'double'
    surv <- .C('agsurv2', as.integer(n),
			  as.integer(0),
			  y = y[ord,],
			  as.double(score[ord]),
			  strata = as.integer(newstrat),
			  surv = double(n),
			  varhaz = double(n),
			  double(1),
			  as.double(0),
	                  nsurv = as.integer(c(method, vartype)),
			  double(2),
			  as.integer(1),
			  double(1),
			  newrisk= as.double(1), PACKAGE="survival")
    nsurv <- surv$nsurv[1]
    ntime <- 1:nsurv
    tsurv <- surv$surv[ntime]
    tvar  <- surv$varhaz[ntime]
    if (surv$strata[1] <=1)
	temp <- list(n=n, time=surv$y[ntime,1],
		 n.risk=surv$y[ntime,2],
		 n.event=surv$y[ntime,3],
		 surv=tsurv, type=type )
    else {
	temp <- surv$strata[1:(1+surv$strata[1])]
	tstrat <- diff(c(0, temp[-1])) #n in each strata
	names(tstrat) <- levels(stratx)
	temp <- list(n=n, time=surv$y[ntime,1],
		 n.risk=surv$y[ntime,2],
		 n.event=surv$y[ntime,3],
		 surv=tsurv,ntimes.strata=tstrat,
		 strata= tstrat, strata.all=strata.all, type=type)
	}
    if (se.fit) temp$std.err <- sqrt(tvar)

    zval <- qnorm(1- (1-conf.int)/2, 0,1)
    if (conf.type=='plain') {
	temp1 <- temp$surv + zval* temp$std * temp$surv
	temp2 <- temp$surv - zval* temp$std * temp$surv
	temp <- c(temp, list(upper=pmin(temp1,1), lower=pmax(temp2,0),
			conf.type='plain', conf.int=conf.int))
	}
    if (conf.type=='log') {
	xx <- ifelse(temp$surv==0,1,temp$surv)  #avoid some "log(0)" messages
	temp1 <- ifelse(temp$surv==0, 0*temp$std, exp(log(xx) + zval* temp$std))
	temp2 <- ifelse(temp$surv==0, 0*temp$std, exp(log(xx) - zval* temp$std))
	temp <- c(temp, list(upper=pmin(temp1,1), lower=temp2,
			conf.type='log', conf.int=conf.int))
	}
    if (conf.type=='log-log') {
	who <- (temp$surv==0 | temp$surv==1) #special cases
	xx <- ifelse(who, .1,temp$surv)  #avoid some "log(0)" messages
	temp1 <- exp(-exp(log(-log(xx)) + zval*temp$std/log(xx)))
	temp1 <- ifelse(who, temp$surv + 0*temp$std, temp1)
	temp2 <- exp(-exp(log(-log(xx)) - zval*temp$std/log(xx)))
	temp2 <- ifelse(who, temp$surv + 0*temp$std, temp2)
	temp <- c(temp, list(upper=temp1, lower=temp2,
			conf.type='log-log', conf.int=conf.int))
	}

    temp$call <- call
    class(temp) <- c('survfit.cox', 'survfit')
    temp
    }









# SCCS @(#)survfit.coxph.s	5.6 07/09/00

survfit.coxph <-
  function(object, newdata, se.fit=TRUE, conf.int=.95, individual=FALSE,
	    type, vartype,
	    conf.type=c('log', 'log-log', 'plain', 'none'),
	    call = match.call()) {

    if(!is.null((object$call)$weights))
	stop("Survfit cannot (yet) compute the result for a weighted model")
    call <- match.call()
    Terms <- terms(object)
    strat <- attr(Terms, "specials")$strata
    cluster<-attr(Terms, "specials")$cluster
    if (length(cluster)) {
	temp <- untangle.specials(Terms, 'cluster')
	Terms <- Terms[-temp$terms]
	}
    resp <-  attr(Terms, "variables")[attr(Terms, "response")]
    n <- object$n
    nvar <- length(object$coef)
    score <- exp(object$linear.predictor)
    
    temp <- c('aalen', 'kalbfleisch-prentice', 'efron',
	           'tsiatis', 'breslow', 'kaplan-meier', 'fleming-harringon',
	           'greenwood', 'exact')
    temp2 <- c(2,1,3,2,2,1,3,1,1)
    if (missing(type)) type <- object$method
    if (missing(vartype)) vartype <- type
    method <- temp2[match(match.arg(type, temp), temp)]
    if (is.na(method)) stop("Invalid survival curve type")
    vartype <- temp2[match(match.arg(vartype, temp), temp)]
    if (is.na(vartype)) stop("Invalid variance type specified")
    if (!se.fit) conf.type <- 'none'
    else conf.type <- match.arg(conf.type)

    # Recreate a copy of the data
    #  (The coxph.getdata routine never returns cluster() terms).
    data <- coxph.getdata(object, y=TRUE, x=se.fit,
			           strata=(length(strat)))
    y <- data$y
    ny <- ncol(y)
    if (nrow(y) != n) stop ("Mismatched lengths: logic error")
    if (length(strat)) strata.all <- table(data$strata)

    # Get the sort index for the data, and add a column to y if
    #  necessary to make it of the "counting process" type  (I only
    #  wrote 1 C routine to handle both cases).
    #
    type <- attr(y, 'type')
    if (type=='counting') {
	if (method=='kaplan-meier')
	      stop ("KM method not valid for counting type data")
	if (length(strat)) ord <- order(data$strata, y[,2], -y[,3])
	else               ord <- order(y[,2], -y[,3]) 
        }
    else if (type=='right') {
	if (length(strat)) ord <- order(data$strata, y[,1], -y[,2])
	else               ord <- order(y[,1], -y[,2]) 
	miny <- min(y[,1])
	if (miny < 0) y <- cbind(2*miny -1, y)
	else          y <- cbind(-1, y)
	}
    else stop("Cannot handle \"", type, "\" type survival data")

    if (!is.null(data$x)) x <- data$x[ord,]
    else                  x <- 0
    if (is.null(object$weights))  weights <- rep(1,n)
    else                          weights <- object$weights[ord]

    # Create a 'nice' strata vector for the C code, 1 at the last obs of
    #   each strata, and 0 otherwise
    if (length(strat)) {
	newstrat <- (as.numeric(data$strata))[ord]
	newstrat <- as.integer(c(1*(diff(newstrat)!=0), 1))
	}
    else newstrat <- as.integer(rep(0,n))
    newstrat[n] <- 1

    # Which type of curve do I need?
    #  1: the new data gives a covariate path for a single individual,
    #          one curve as a result
    #  2: each line of new data is "covariates over all time", and 
    #          gives rise to a separate curve
    #
    if (individual && !missing(newdata)) stype <- 1
    else {
	stype <- 2
	# Don't need (or want) strata term if it is there
	if (length(strat)) {
	    temp <- untangle.specials(Terms, 'strata')
	    Terms <- Terms[-temp$terms]
	    }
	}
    if (stype==1 && method != vartype)
	    stop("The type and vartype args must agree for individual=T")
    if (stype==1 && method==1)
	    stop("Only Aalen and F-H estimates available for individual=T")

    #
    # Get the second, "new" data set.  By default the new curve is
    #  produced around the mean of the old ones.  It the new data set
    #  is missing, use the old data set along with the mean of the old
    #  data set, but NOT the mean of the old offset variables.
    #  
    offset2 <- 0   #offset variable for the new data set
    if (!missing(newdata)) {
	m2 <- model.newframe(Terms, newdata, response=(stype==1))
	if (!inherits(m2, 'data.frame'))  {
	    x2 <- as.matrix(m2)
	    if (ncol(x2) != nvar) stop ("Wrong # of variables in new data")
	    n2 <- nrow(x2)
	    if (stype==1) stop("Program error #3")
	    }

	else  {
	    x2 <- model.matrix(delete.response(Terms), m2)[,-1,drop=FALSE]
	    n2 <- nrow(x2)
	    offset2 <- model.extract(m2, 'offset')
	    if (is.null(offset2)) offset2 <- 0
	    if (stype==1) {
		#
		# The case of an agreg, with a multiple line newdata
		#
		strata.all <- object$n
		if (length(strat)) {
		    strata2 <- factor(x2[,strat], levels=levels(stratum))
		    x2 <- x2[, -strat, drop=FALSE]
		    }
		else strata2 <- rep(1, nrow(x2))
		y2 <- model.extract(m2, 'response')
		if (attr(y2,'type') != type)
		    stop("Survival type of newdata does not match the fitted model")
		if (nrow(y2) != n2) stop("Wrong # of rows for Y")
		}
	    }
	}
    else x2 <- matrix(object$means, nrow=1)
    n2 <- nrow(x2)

    # Compute risk scores for the new subjects
    coef <- ifelse(is.na(object$coef), 0, object$coef)
    newrisk <- exp(c(x2 %*% coef) + offset2 - sum(coef*object$means))

    dimnames(y) <- NULL   #I only use part of Y, so names become invalid
    storage.mode(y) <- 'double'
    ndead <- sum(y[,3])
    if (stype==1) {
	surv <- .C("agsurv1", as.integer(n),
			     as.integer(nvar),
			     y[ord,],
			     as.double(score[ord]),
			     strata=as.integer(newstrat),
			     surv=double(ndead*n2),
			     varhaz=double(ndead*n2),
			     nsurv=as.integer(method==3),
			     as.double(x),
			     double(3*nvar),
			     as.double(object$var),
			     y = double(3*n*n2),
			     as.integer(n2),
			     as.double(y2),
			     as.double(x2),
			     as.double(newrisk),
			     as.integer(strata2), PACKAGE="survival" )
	ntime <- 1:surv$nsurv
	temp <- (matrix(surv$y, ncol=3))[ntime,]
	temp <- list(n=n, time = temp[,1],
		     n.risk= temp[,2],
		     n.event=temp[,3],
		     surv = surv$surv[ntime],
		     type=type)
	if (se.fit) temp$std.err <- sqrt(surv$varhaz[ntime])
	}
    else {
	surv <- .C('agsurv2', as.integer(n),
			      as.integer(nvar* se.fit),
			      y = y[ord,],
			      as.double(score[ord]),
			      strata = as.integer(newstrat),
			      surv = double(ndead*n2),
			      varhaz = double(ndead*n2),
			      as.double(x),
			      as.double(object$var),
			      nsurv = as.integer(c(method, vartype)),
			      double(3*nvar),
			      as.integer(n2),
			      as.double(x2),
			      as.double(newrisk), PACKAGE="survival")
	nsurv <- surv$nsurv[1]
	ntime <- 1:nsurv
	if (n2>1) {
	    tsurv <- matrix(surv$surv[1:(nsurv*n2)], ncol=n2)
	    tvar  <- matrix(surv$varhaz[1:(nsurv*n2)], ncol=n2)
	    dimnames(tsurv) <- list(NULL, dimnames(x2)[[1]])
	    }
	else {
	    tsurv <- surv$surv[ntime]
	    tvar  <- surv$varhaz[ntime]
	    }
	if (surv$strata[1] <=1)
	    temp <- list(n=n,time=surv$y[ntime,1],
		     n.risk=surv$y[ntime,2],
		     n.event=surv$y[ntime,3],
		     surv=tsurv,
			type=type)
	else {
	    temp <- surv$strata[1:(1+surv$strata[1])]
	    tstrat <- diff(c(0, temp[-1])) #n in each strata
	    names(tstrat) <- levels(data$strata)
	    temp <- list(n=n, time=surv$y[ntime,1],
		     n.risk=surv$y[ntime,2],
		     n.event=surv$y[ntime,3],
		     surv=tsurv,
		     strata= tstrat, ntimes.strata=tstrat,
			strata.all=strata.all,
			type=type)
	    }
	if (se.fit) temp$std.err <- sqrt(tvar)
	}

    zval <- qnorm(1- (1-conf.int)/2, 0,1)
    if (conf.type=='plain') {
	temp1 <- temp$surv + zval* temp$std * temp$surv
	temp2 <- temp$surv - zval* temp$std * temp$surv
	temp <- c(temp, list(upper=pmin(temp1,1), lower=pmax(temp2,0),
			conf.type='plain', conf.int=conf.int))
	}
    if (conf.type=='log') {
	xx <- ifelse(temp$surv==0,1,temp$surv)  #avoid some "log(0)" messages
	temp1 <- ifelse(temp$surv==0, 0*temp$std, exp(log(xx) + zval* temp$std))
	temp2 <- ifelse(temp$surv==0, 0*temp$std, exp(log(xx) - zval* temp$std))
	temp <- c(temp, list(upper=pmin(temp1,1), lower=temp2,
			conf.type='log', conf.int=conf.int))
	}
    if (conf.type=='log-log') {
	who <- (temp$surv==0 | temp$surv==1) #special cases
	xx <- ifelse(who, .1,temp$surv)  #avoid some "log(0)" messages
	temp1 <- exp(-exp(log(-log(xx)) + zval*temp$std/log(xx)))
	temp1 <- ifelse(who, temp$surv + 0*temp$std, temp1)
	temp2 <- exp(-exp(log(-log(xx)) - zval*temp$std/log(xx)))
	temp2 <- ifelse(who, temp$surv + 0*temp$std, temp2)
	temp <- c(temp, list(upper=temp1, lower=temp2,
			conf.type='log-log', conf.int=conf.int))
	}

    temp$call <- call
    class(temp) <- c('survfit.cox', 'survfit')
    temp
    }



#SCCS @(#)survfit.km.s	4.16 07/09/00
survfit.km <- function(x, y, casewt=rep(1,n),
		       type=c('kaplan-meier', 'fleming-harrington', 'fh2'),
		       error=c('greenwood', "tsiatis"), se.fit=TRUE,
		       conf.int= .95,
		       conf.type=c('log',  'log-log',  'plain', 'none'),
		       conf.lower=c('usual', 'peto', 'modified'),
		       new.start) {
    type <- match.arg(type)
    method <- match(type, c("kaplan-meier", "fleming-harrington", "fh2"))

    error <- match.arg(error)
    error.int <- match(error, c("greenwood", "tsiatis"))
    conf.type <- match.arg(conf.type)
    conf.lower<- match.arg(conf.lower)

    if (!is.Surv(y)) stop("y must be a Surv object")
    if (!is.factor(x)) stop("x must be a factor")
    if (attr(y, 'type') != 'right' && attr(y, 'type') != 'counting')
	    stop("Can only handle right censored or counting data")

    ny.all <- ncol(y) # getting total number of observations and strata
    n.all <- nrow(y)  # before possible subsetting
    sorted <- (1:n.all)[order(x, y[,ny.all-1])]
    strata.all.temp <- as.numeric(x[sorted])
    strata.all <- as.vector(table(strata.all.temp))

    if (!missing(new.start)) { # checking if not starting at first time
	keep <- (y[,ny.all-1] >= new.start)
	if (all(keep==FALSE))
		stop(paste("new.start =", new.start,
			   "is greater than all time points."))
	x <- x[keep]
	y <- y[keep,,drop=FALSE]
        }

    ny <- ncol(y) # getting working total observations and strata
    n <- nrow(y)
    sorted <- (1:n)[order(x, y[,ny-1])]
    y <- y[sorted,]
    strata.temp <- as.numeric(x[sorted])
    newstrat <- 1
    if (n > 1) {
	newstrat <- as.integer(c(1*(diff(strata.temp)!=0), 1))
	if (sum(newstrat) > n/2)
		warning("Number of strata > number of observations/2")
        }
    if (method==3 && any(floor(casewt) != casewt))
	    stop("The fh2 method is not valid for fractional case weights")

    nstrat <- length(unique(strata.temp))

    if (attr(y, 'type') == 'right') # list of times is created differently 
	    times <- y[,1, drop=FALSE]  # depending on censoring
    else if (attr(y, 'type') == 'counting')
	    times <- cbind(y[,1],y[,2])

    sort.times <- tapply(times, strata.temp[row(times)], 
			 function(x) sort(unique(x)))
    ntimes.strata <- sapply(sort.times, length)
    times <- unlist(sort.times)

    if (attr(y, 'type') == 'right') # call correct surv program based 
	    surv <- .C("survfit2",  # on censoring 
		       as.integer(n),
		       y = as.double(y),
		       as.double(casewt[sorted]),
		       strata= as.integer(newstrat),
		       as.integer(method),
		       as.integer(error.int),
		       mark=double(n),
		       surv=double(n),
		       varhaz=double(n),
		       risksum=double(n), PACKAGE="survival")
    else if (attr(y, 'type') == 'counting')
	    surv <- .C("survfit3",
		       as.integer(n),
		       as.double(y),
		       as.double(casewt[sorted]),
		       strata=as.integer(newstrat),
		       as.integer(method),
		       as.integer(error.int),
		       as.integer(nstrat),
		       as.double(ntimes.strata),
		       y=as.double(times),
		       mark=double(length(times)),
		       surv=double(length(times)),
		       varhaz=double(length(times)),
		       risksum=double(length(times)),
		       enter=double(length(times)),
		       exit.censored=double(length(times)), PACKAGE="survival")
    ntime <- length(times)
    if (error.int==1) surv$varhaz[surv$surv==0] <- NA
    ntime <- 1:ntime

    if (attr(y, 'type') == 'right') {
	if (nstrat == 1)
		temp <- list(n=n.all, # total number of obs
			     time=surv$y[ntime],
			     n.risk=surv$risksum[ntime],
			     n.event=surv$mark[ntime],
			     surv=surv$surv[ntime],
			     type=attr(y, 'type')) # type of censoring
	else {
	    temp <- surv$strata[1:nstrat]
	    tstrat <- diff(c(0, temp)) # number in each strata
	    names(tstrat) <- levels(x)[1:nstrat] # number in strata may differ
	    names(strata.all) <- levels(x)
	    temp <- list(n=n.all,
			 time=surv$y[ntime],
			 n.risk=surv$risksum[ntime],
			 n.event=surv$mark[ntime],
			 surv=surv$surv[ntime],
			 type=attr(y, 'type'),
			 ntimes.strata=ntimes.strata,
			 strata=tstrat,
			 strata.all=strata.all)
	    }
        }
    else if (attr(y, 'type') == 'counting') {
	if (nstrat == 1) 
		temp <- list(n=n.all,
			     time=surv$y[ntime],
			     n.risk=surv$risksum[ntime],
			     n.event=surv$mark[ntime],
			     surv=surv$surv[ntime],
			     type=attr(y, 'type'),
			     enter=surv$enter[ntime],
			     exit.censored=surv$exit.censored[ntime])
	else {	
	    temp <-surv$strata[1:nstrat]
	    tstrat <- diff(c(0, temp))
	    names(tstrat) <- levels(x)[1:nstrat] # number of strata may differ
	    names(strata.all) <- levels(x)
	    temp <- list(n=n.all,
			 time=surv$y[ntime],
			 ntimes.strata=ntimes.strata,
			 n.risk=surv$risksum[ntime],
			 n.event=surv$mark[ntime],
			 surv=surv$surv[ntime],
			 type=attr(y, 'type'),
			 strata=tstrat,
			 strata.all=strata.all,
			 enter=surv$enter[ntime],
			 exit.censored=surv$exit.censored[ntime])
	    }
        }

    if (!missing(new.start))
	    temp$new.start <- new.start # user defined time to start

    if (se.fit) {
	std.err <- sqrt(surv$varhaz[ntime])
	temp$std.err <- std.err
	events <- temp$n.event >0
	n.lag <- rep(c(temp$n.risk[1], temp$n.risk[events]),
		     diff(c(ntime[1], ntime[events], 1+max(ntime))))
	std.low <- switch(conf.lower,
			  'usual' = std.err,
			  'peto' = sqrt((1-temp$surv)/ temp$n.risk),
			  'modified' = std.err * sqrt(n.lag/temp$n.risk))
	zval <- qnorm(1- (1-conf.int)/2, 0,1)

	if (conf.type=='plain') {
	    temp1 <- temp$surv + zval* std.err * temp$surv
	    temp2 <- temp$surv - zval* std.low * temp$surv
	    temp <- c(temp, list(upper=pmin(temp1,1), lower=pmax(temp2,0),
				 conf.type='plain', conf.int=conf.int))
	    }

	if (conf.type=='log') {
	    #avoid some "log(0)" messages
	    xx <- ifelse(temp$surv==0,1,temp$surv)  

	    temp1 <- ifelse(temp$surv==0, NA, exp(log(xx) + zval* std.err))
	    temp2 <- ifelse(temp$surv==0, NA, exp(log(xx) - zval* std.low))
	    temp <- c(temp, list(upper=pmin(temp1,1), lower=temp2,
				 conf.type='log', conf.int=conf.int))
	    }

	if (conf.type=='log-log') {
	    who <- (temp$surv==0 | temp$surv==1) #special cases
	    temp3 <- ifelse(temp$surv==0, NA, 1)
	    xx <- ifelse(who, .1,temp$surv)  #avoid some "log(0)" messages
	    temp1 <- exp(-exp(log(-log(xx)) + zval*std.err/log(xx)))
	    temp1 <- ifelse(who, temp3, temp1)
	    temp2 <- exp(-exp(log(-log(xx)) - zval*std.low/log(xx)))
	    temp2 <- ifelse(who, temp3, temp2)
	    temp <- c(temp, list(upper=temp1, lower=temp2,
				 conf.type='log-log', conf.int=conf.int))
	    }
        }
    temp
    }














#SCCS @(#)survfit.s	4.19 09/08/00
survfit <- function (formula, data, weights, subset, na.action, ...) {
    call <- match.call()
    # Real tricky -- find out if the first arg is "Surv(...)" without
    #  evaluating it.  If this is so, or it is a survival object, turn it
    #  into a formula
    # (This allows people to leave off the "~1" from a formula)
    if ((mode(call[[2]]) == 'call' &&  call[[2]][[1]] == as.name('Surv'))
		|| inherits(formula, 'Surv'))  {
        formula<-eval(parse(text=paste(deparse(call[[2]]),1,sep="~")))
        ## need to add back the formula environment
        environment(formula)<-parent.frame()
	}

    # if the first object is a Cox model, call survfit.coxph
    if (!inherits(formula, 'formula')) temp <- UseMethod("survfit")
    else {
	# Ok, I have a formula
        # grab the data and process it
	m <- match.call(expand=FALSE)
	m$... <- NULL

	Terms <- terms(formula, 'strata')
	ord <- attr(Terms, 'order')
	if (length(ord) & any(ord !=1))
	    stop("Interaction terms are not valid for this function")
	m$formula <- Terms
	m[[1]] <- as.name("model.frame")
	m <- eval(m, parent.frame())

	n <- nrow(m)
	Y <- model.extract(m, response)
	if (!is.Surv(Y)) stop("Response must be a survival object")

	casewt <- model.extract(m, "weights")
	# The second line below works around a bug in Splus 3.0.1, which later
	#    went away, i.e., casewt is returned as an unevaluated arg.
	if (is.null(casewt)) casewt <- rep(1,n)
	##else if (mode(casewt)=='argument') casewt <- eval(casewt[[1]])

	if (!is.null(attr(Terms, 'offset'))) warning("Offset term ignored")

	ll <- attr(Terms, 'term.labels')
	if (length(ll) == 0) X <- factor(rep(1,n))  # ~1 on the right
	else X <- strata(m[ll])

	temp <- survfit.km(X, Y, casewt, ...)
	class(temp) <- "survfit"
	if (!is.null(attr(m, 'na.action'))) 
		temp$na.action <- attr(m, 'na.action')
	}
    temp$call <- call
    # added to change INFs to NAs for C program - cmb 8/25/2000
    ## I don't think we want this <TSL>
    ##if (any(is.inf(temp$std.err))) temp$std.err[is.inf(temp$std.err)] <- NA
    temp
    }

# The subscript function is bundled in here, although used most
#  often in plotting

"[.survfit" <- function(fit, ..., drop=FALSE) {
    if (missing(..1)) i<- NULL  else i <- ..1
    if (missing(..2)) j<- NULL  else j <- ..2
    if (is.null(fit$strata)) {
	if (is.matrix(fit$surv)) {
	    fit$surv <- fit$surv[,i,drop=drop]
	    if (!is.null(fit$std.err)) fit$std.err <- fit$std.err[,i,drop=drop]
	    if (!is.null(fit$upper)) fit$upper <- fit$upper[,i,drop=drop]
	    if (!is.null(fit$lower)) fit$lower <- fit$lower[,i,drop=drop]
	    }
	else warning("Survfit object has only a single survival curve")
	}
    else {
	if (is.null(i)) keep <- seq(along=fit$time)
	else {
	    if (is.null(fit$ntimes.strata)) strata.var <- fit$strata
	    else strata.var <- fit$ntimes.strata
	    if (is.character(i)) strat <- rep(names(fit$strata), strata.var)
	    else                 strat <- rep(1:length(fit$strata), strata.var)
	    keep <- seq(along=strat)[match(strat, i, nomatch=0)>0]
	    if (length(i) <=1) fit$strata <- NULL
	    else               fit$strata  <- fit$strata[i]
	    if (!is.null(fit$ntimes.strata)) {
		fit$strata.all <- fit$strata.all[i]
		fit$ntimes.strata <- fit$ntimes.strata[i]
	        }
	    fit$time    <- fit$time[keep]
	    fit$n.risk  <- fit$n.risk[keep]
	    fit$n.event <- fit$n.event[keep]
	    }
	if (is.matrix(fit$surv)) {
	    if (is.null(j)) {
		fit$surv <- fit$surv[keep,,drop=drop]
		if (!is.null(fit$std.err)) 
			fit$std.err <- fit$std.err[keep,,drop=drop]
		if (!is.null(fit$upper)) fit$upper <-fit$upper[keep,,drop=drop]
		if (!is.null(fit$lower)) fit$lower <-fit$lower[keep,,drop=drop]
		}
	    else {
		fit$surv <- fit$surv[keep,j]
		if (!is.null(fit$std.err)) fit$std.err <- fit$std.err[keep,j]
		if (!is.null(fit$upper)) fit$upper <- fit$upper[keep,j]
		if (!is.null(fit$lower)) fit$lower <- fit$lower[keep,j]
		}
	    }
	else {
	    fit$surv <- fit$surv[keep]
	    if (!is.null(fit$enter)) fit$enter <- fit$enter[keep]
	    if (!is.null(fit$exit.censored))
		    fit$exit.censored <- fit$exit.censored[keep]
	    if (!is.null(fit$std.err)) fit$std.err <- fit$std.err[keep]
	    if (!is.null(fit$upper)) fit$upper <- fit$upper[keep]
	    if (!is.null(fit$lower)) fit$lower <- fit$lower[keep]
	    }
	}
    fit
    }

basehaz<-function(fit,centered=TRUE){
    if(!inherits(fit,"coxph"))
        stop("must be a coxph object")

    sfit<-survfit(fit)

    H<- -log(sfit$surv)

    strata<-sfit$strata
    if (!is.null(strata))
        strata<-rep(names(strata),strata)
    
    if (!centered){
        z0<-fit$means
        bz0<-sum(z0*coef(fit))
        H<- H*exp(-bz0)
    }

    if (is.null(strata))
      return(data.frame(hazard=H,time=sfit$time))
    else
      return(data.frame(hazard=H,time=sfit$time,strata=strata))

}
# SCCS  @(#)survobrien.s	4.5 01/13/98
#
# The test for survival proposed by Peter O'Brien
#
survobrien <- function(formula, data= sys.frame(sys.parent())) {

    unprotect<-function(Terms){
        namei<-as.name("I")
        namefactor<-as.name("factor")
        unprotect1<-function(aterm){
            if (is.name(aterm))
                return(aterm)
            if (is.name(aterm[[1]])){
                if (aterm[[1]]==namei | aterm[[1]]==namefactor)
                    return(aterm[[2]])
                if (length(aterm)==1)
                    return(aterm)
                for (i in 2:length(aterm)){
                    aterm[[i]]<-unprotect1(aterm[[i]])
                }
            }
            aterm
        }
        if (length(Terms)==3)
            Terms[[3]]<-unprotect1(Terms[[3]])
        else
            Terms[[2]]<-unprotect1(Terms[[2]])
        terms.formula(Terms)
    }
    
    
  
    m <- model.frame(formula, data, na.action= function(x) x )
    n <- nrow(m)
    Terms <- attr(m, 'terms')

    y <- model.extract(m, 'response')
    if (!inherits(y, "Surv")) stop ("Response must be a survival object")
    if (attr(y, 'type') != 'right') stop("Can only handle right censored data")

    # Figure out which are the continuous predictor variables
    factors <- unlist(lapply(m, is.factor))
    protected <- unlist(lapply(m, function(x) inherits(x, "AsIs")))
    keepers <- factors | protected
    cont <- ((seq(keepers))[!keepers]) [-1]
    if (length(cont)==0) stop ("No continuous variables to modify")
    else {
	temp <- (names(m))[-1]      #ignore the response variable
	protected <- protected[-1]
	if (any(protected)| any(factors)) {
###       temp[protected] <- (attr(terms.inner(Terms), 'term.labels'))[protected]
          temp <- attr(unprotect(Terms),'term.labels')
        }
	kname <- temp[keepers[-1]]
      }

    ord <- order(y[,1])
    x <- as.matrix(m[ord, cont, drop=FALSE])
    time <- y[ord,1]
    status <- y[ord,2]
    nvar <- length(cont)

    nline <- 0
    for (i in unique(time[status==1])) nline <- nline + sum(time >=i)
    start <- stop <- event <- double(nline)
    xx <- matrix(double(nline*nvar), ncol=nvar, 
		 dimnames=list(NULL, dimnames(x)[[2]]))
    ltime <- 0
    j<- 1
    keep.index <- NULL

    for (i in unique(time[status==1])) {
	who <- (time >=i)
	nrisk <- sum(who)

	temp <- apply(x[who,,drop=FALSE], 2, rank)
	temp <- (2*temp -1)/ (2* nrisk)   #percentiles
	logit<- log(temp/(1-temp))           #logits
	deaths <- (status[who]==1 & time[who]==i)

	k <- seq(from=j, length=nrisk)
	start[k] <- ltime
	stop[k] <-  i
	event[k] <- deaths
	xx[k,] <- logit
	j <- j + nrisk
	ltime <- i
	keep.index <- c(keep.index, ord[who])
	}

    if (any(keepers)){
	temp <- m[keep.index, keepers, drop=FALSE]
	names(temp) <- kname
	data.frame(start, stop, event, temp, xx)
        }
    else  data.frame(start, stop, event, xx)
    }
# 
#  SCCS @(#)survpenal.fit.s	1.6 02/08/99
# fit a penalized parametric model
#
survpenal.fit<- function(x, y, weights, offset, init, controlvals, dist, 
		       scale=0, nstrat=1, strata, pcols, pattr, assign,
			 parms=NULL) {

    iter.max <- controlvals$iter.max
    outer.max <- controlvals$outer.max
    eps <- controlvals$rel.tol
    toler.chol <- controlvals$toler.chol
    debug <- controlvals$debug

    if (!is.matrix(x)) stop("Invalid X matrix ")
    n <- nrow(x)
    nvar <- ncol(x) 
    ny <- ncol(y)
    if (is.null(offset)) offset <- rep(0,n)
    if (missing(weights)|| is.null(weights)) weights<- rep(1.0,n)
    else if (any(weights<=0)) stop("Invalid weights, must be >0")

    if (scale <0) stop("Invalid scale")
    if (scale >0 && nstrat >1) 
	    stop("Cannot have both a fixed scale and strata")
    if (nstrat>1 && (missing(strata) || length(strata)!= n))
	    stop("Invalid strata variable")
    if (nstrat==1) strata <- rep(1,n)
    if (scale >0)
      nstrat2 <- 0
    else
      nstrat2 <- nstrat

    if (is.character(dist)) {
	sd <- survreg.distributions[[dist]]
	if (is.null(sd)) stop ("Unrecognized distribution")
	}
    else sd <- dist
    dnum <- match(sd$name, c("Extreme value", "Logistic", "Gaussian"))
    fdensity<-function(z) {
      stop("this is just a placeholder and should never be called")
    }
    f.expr1<-function(coef) {
      stop("this is just a placeholder and should never be called")
    }
    f.expr2<-function(coef) {
      stop("this is just a placeholder and should never be called")
    }
    if (is.na(dnum)) {
	# Not one of the "built-in distributions
	dnum <- 4
	fitter <- c('survreg3', 'survreg5')
	#Set up the callback for the sparse frailty term
	n2 <- n + sum(y[,ny]==3)
	fdensity <- function(z){
	    if (length(parms)) temp <- sd$density(z, parms)
	    else               temp <- sd$density(z)
	    
	    if (!is.matrix(temp) || any(dim(temp) != c(n2,5)))
		    stop("Density function returned an invalid matrix")
	    survlist$density <- as.vector(as.double(temp))
	    survlist
          }
	survlist <- list(z=double(n2), density=double(n2*5))
	###.C("init_survcall", as.integer(sys.nframe()), expr1)
	}
    else fitter <- c('survreg2', 'survreg4')

    # This is a subset of residuals.survreg: define the first and second
    #   derivatives at z=0 for the 4 censoring types
    #   Used below for starting estimates
    derfun <- function(y, eta, sigma, density, parms) {
	ny <<- ncol(y)
	status <- y[,ny]
	z <- (y[,1] - eta)/sigma
	dmat <- density(z,parms)
	dtemp<- dmat[,3] * dmat[,4]    #f'
	if (any(status==3)) {
	    z2 <- (y[,2] - eta)/sigma
	    dmat2 <- density(z2)
	    }
	else {
	    dmat2 <- matrix(0,1,5)   #dummy values
	    z2 <- 0
	    }
	tdenom <- ((status==0) * dmat[,2]) +
		  ((status==1) * 1 )       +
		  ((status==2) * dmat[,1]) +
		  ((status==3) * ifelse(z>0, dmat[,2]-dmat2[,2], 
		                             dmat2[,1] - dmat[,1]))
	tdenom <- 1/(tdenom* sigma)
	dg <- -tdenom   *(((status==0) * (0-dmat[,3])) +
			  ((status==1) * dmat[,4]) + 
			  ((status==2) * dmat[,3]) +
			  ((status==3) * (dmat2[,3]- dmat[,3])))

	ddg <- (tdenom/sigma)*(((status==0) * (0- dtemp)) +
			       ((status==1) * dmat[,5]) +
			       ((status==2) * dtemp) +
			       ((status==3) * (dmat2[,3]*dmat2[,4] - dtemp))) 
	list(dg = dg, ddg = ddg - dg^2)
	}
    status <- y[,ny]

    #
    # are there any sparse frailty terms?
    # 
    npenal <- length(pattr)
    if (npenal == 0 || length(pcols) != npenal)
	    stop("Invalid pcols or pattr arg")
    sparse <- sapply(pattr, function(x) !is.null(x$sparse) &&  x$sparse)
    if (sum(sparse) >1) stop("Only one sparse penalty term allowed")

    #
    # Create a marking vector for the terms, the same length as assign
    #    with pterms == 0=ordinary term, 1=penalized, 2=sparse,
    #    pindex = length of pcols = position in pterms
    # 
    # Make sure that pcols is a strict subset of assign, so that the
    #   df computation (and printing) can unambiguously decide which cols of
    #   X are penalized and which are not when doing "terms" like actions.
    # To make some downstream things easier, order pcols and pattr to be
    #   in the same relative order as the terms in 'assign' 
    #
    ## if (missing(assign)) assign <- attr(x, 'assign')
    pterms <- rep(0, length(assign))
    names(pterms) <- names(assign)
    pindex <- rep(0, npenal)
    for (i in 1:npenal) {
	temp <- unlist(lapply(assign, function(x,y) (length(x) == length(y) &&
					     all(x==y)), pcols[[i]]))
	if (sparse[i]) pterms[temp] <- 2
	else pterms[temp] <- 1
	pindex[i] <- (seq(along=temp))[temp]
	}
    if ((sum(pterms==2) != sum(sparse)) || (sum(pterms>0) != npenal))
	    stop("pcols and assign arguments disagree")
    if (any(pindex != sort(pindex))) {
	temp <- order(pindex)
	pindex <- pindex[temp]
	pcols <- pcols[temp]
	pattr <- pattr[temp]
	}
    
    # ptype= 1 or 3 if a sparse term exists, 2 or 3 if a non-sparse exists
    ptype <- any(sparse) + 2*(any(!sparse))

    if (any(sparse)) {
	sparse.attr <- (pattr[sparse])[[1]]  #can't use [[sparse]] directly
	                                     # if 'sparse' is a T/F vector
	fcol <- unlist(pcols[sparse])
	if (length(fcol) > 1) stop("Sparse term must be single column")

	# Remove the sparse term from the X matrix
	frailx <- x[, fcol]
	x <- x[, -fcol, drop=FALSE]
	for (i in 1:length(assign)){
	    j <- assign[[i]]
	    if (j[1] > fcol) assign[[i]] <- j-1
	    }
	for (i in 1:npenal) {
	    j <- pcols[[i]]
	    if (j[1] > fcol) pcol[[i]] <- j-1
	    }

	frailx <- match(frailx, sort(unique(frailx)))
	nfrail <- max(frailx)
	nvar <- nvar - 1

	#Set up the callback for the sparse frailty term
	pfun1 <- sparse.attr$pfun
	f.expr1 <- function(coef){
            coxlist1$coef<-coef
	    if (is.null(extra1)) temp <- pfun1(coef, theta1, n.eff)
	    else  temp <- pfun1(coef, theta1, n.eff, extra1)

	    if (!is.null(temp$recenter)) 
		    coxlist1$coef <- coxlist1$coef - as.double(temp$recenter)
	    if (!temp$flag) {
		coxlist1$first <- -as.double(temp$first)
		coxlist1$second <- as.double(temp$second)
	        }
	    coxlist1$penalty <- -as.double(temp$penalty)
	    coxlist1$flag   <- as.logical(temp$flag)
	    if (any(sapply(coxlist1, length) != c(rep(nfrail,3), 1, 1)))
		    stop("Incorrect length in coxlist1")
	    coxlist1
          }
	coxlist1 <- list(coef=double(nfrail), first=double(nfrail), 
			 second=double(nfrail), penalty=0.0, flag=FALSE)
	###.C("init_coxcall1", as.integer(sys.nframe()), expr1)
	}
    else {
	frailx <- 0
	nfrail <- 0
	}
    nvar2 <- nvar + nstrat2

    # Now the non-sparse penalties
    if (sum(!sparse) >0) {
	full.imat <- !all(unlist(lapply(pattr, function(x) x$diag)))
	ipenal <- (1:length(pattr))[!sparse]   #index for non-sparse terms
	f.expr2 <- function(coef){
            coxlist2$coef<-coef
	    pentot <- 0
	    for (i in ipenal) {
		pen.col <- pcols[[i]]
		coef<-coxlist2$coef[pen.col]
		if (is.null(extralist[[i]]))
			temp <- ((pattr[[i]])$pfun)(coef, thetalist[[i]],n.eff)
		else    temp <- ((pattr[[i]])$pfun)(coef, thetalist[[i]],
						n.eff,extralist[[i]])
		if (!is.null(temp$recenter))
		    coxlist2$coef[pen.col] <- coxlist2$coef[pen.col]- 
			                               temp$recenter
		if (temp$flag) coxlist2$flag[pen.col] <- TRUE
		else {
		    coxlist2$flag[pen.col] <- FALSE
		    coxlist2$first[pen.col] <- -temp$first
		    if (full.imat) {
			tmat <- matrix(coxlist2$second, nvar2, nvar2)
			tmat[pen.col,pen.col] <- temp$second
			coxlist2$second <- c(tmat)
		        }
		    else coxlist2$second[pen.col] <- temp$second
		    }
		pentot <- pentot - temp$penalty
	        }
	    coxlist2$penalty <- as.double(pentot)
	    if (any(sapply(coxlist2, length) != length2)) 
		    stop("Length error in coxlist2")
	    coxlist2
          }
        ##if (debug) debug(f.expr2)
	if (full.imat) {
	    coxlist2 <- list(coef=double(nvar2), first=double(nvar2), 
		    second= double(nvar2*nvar2), penalty=0.0, flag=rep(FALSE,nvar2))
	    length2 <- c(nvar2, nvar2, nvar2*nvar2, 1, nvar2)
	    }  
	else {
	    coxlist2 <- list(coef=double(nvar2), first=double(nvar2),
		    second=double(nvar2), penalty= 0.0, flag=rep(FALSE,nvar2))
	    length2 <- c(nvar2, nvar2, nvar2, 1, nvar2)
	    }
	###.C("init_coxcall2", as.integer(sys.nframe()), expr2)
        }
    else full.imat <- FALSE

    #
    # "Unpack" the passed in paramter list, 
    #   and make the initial call to each of the external routines
    #
    cfun <- lapply(pattr, function(x) x$cfun)
    parmlist <- lapply(pattr, function(x,eps) c(x$cparm, eps2=eps), sqrt(eps))
    extralist<- lapply(pattr, function(x) x$pparm)
    iterlist <- vector('list', length(cfun))
    thetalist <- vector('list', length(cfun))
    printfun  <- lapply(pattr, function(x) x$printfun)
    for (i in 1:length(cfun)) {
	temp <- (cfun[[i]])(parmlist[[i]], iter=0)
	if (sparse[i]) {
	    theta1 <- temp$theta
	    extra1 <- extralist[[i]]
	    }
	thetalist[[i]] <- temp$theta
	iterlist[[i]] <- temp
	}

    #
    # Manufacture the list of calls to cfun, with appropriate arguments
    #
    temp1 <- c('x', 'coef', 'plik', 'loglik', 'status', 'neff',  'df', 'trH')
    temp2 <- c('frailx', 'fcoef', 'loglik',  'fit$loglik', 'status', 'n.eff')
    temp3 <- c('x[,pen.col]', 'coef[pen.col]','loglik',
	       'fit$loglik', 'status', 'n.eff')
    calls <- vector('expression', length(cfun))
    cargs <- lapply(pattr, function(x) x$cargs)
    for (i in 1:length(cfun)) {
	tempchar <- paste("(cfun[[", i, "]])(parmlist[[", i, "]], iter,",
			  "iterlist[[", i, "]]")
	temp2b <- c(temp2, paste('pdf[', i, ']'), paste('trH[', i, ']'))
	temp3b <- c(temp3, paste('pdf[', i, ']'), paste('trH[', i, ']'))
	if (length(cargs[[i]])==0) 
	    calls[i] <- parse(text=paste(tempchar, ")"))
	else {
	    temp <- match(cargs[[i]], temp1)
	    if (any(is.na(temp))) stop(paste((cargs[[i]])[is.na(temp)],
					    "not matched"))
	    if (sparse[i]) temp4 <- paste(temp2b[temp], collapse=',')
	    else           temp4 <- paste(temp3b[temp], collapse=',')
	    
	    calls[i] <- parse(text=paste(paste(tempchar,temp4,sep=','),')'))
	    }
        }
    need.df <- any(!is.na(match(c('df', 'trH'), unlist(cargs))))#do any use df?

    #
    # Last of the setup: create the vector of variable names
    #
    varnames <- dimnames(x)[[2]]
    for (i in 1:npenal) {
	if (!is.null(pattr[[i]]$varname))
		varnames[pcols[[i]]] <- pattr[[i]]$varname
        }

    #
    # Fit the model with just a mean and scale
    #    assume initial values and penalties don't apply here
    #
    meanonly <- (nvar==1 && all(x==1) && nfrail==0)
    if (meanonly) stop("Cannot fit a penalized 'mean only' model")

    yy <- ifelse(status !=3, y[,1], (y[,1]+y[,2])/2 )
    coef <- sd$init(yy, weights,parms)
    # We sometimes get into trouble with a small estimate of sigma,
    #  (the surface isn't SPD), but never with a large one.  Double it.
    if (scale >0) vars <- log(scale)
    else vars <- log(coef[2])/2 +.7  # init returns \sigma^2, I need log(sigma)
    coef <- c(coef[1], rep(vars, nstrat))
    # get a better initial value for the mean using the "glim" trick
    deriv <- derfun(y, yy, exp(vars), sd$density, parms)
    wt <-  -1*deriv$ddg*weights
    coef[1] <- sum(weights*deriv$dg + wt*(yy -offset)) / sum(wt)

    # Now the fit proper (intercept only)
    temp <- 1 +nstrat2
    fit0 <- .C(fitter[1],
	       iter = as.integer(iter.max),
	       as.integer(n),
	       as.integer(1),
	       as.double(y),
	       as.integer(ny),
	       rep(1.0, n),
	       as.double(weights),
	       as.double(offset),
	       coef= as.double(coef),
	       as.integer(nstrat2),
	       as.integer(strata),
	       u = double(3*(temp) + temp^2),
	       var = matrix(0.0, temp, temp),
	       loglik=double(1),
	       flag=integer(1),
	       as.double(eps),
	       as.double(toler.chol), 
	       as.integer(dnum),
	       debug = as.integer(floor(debug/2)), fdensity, environment(),
                    PACKAGE="survival")

    # The "effective n" of the model
    temp <-  mean(exp(fit0$coef[-1]))   #overall sd
    n.eff <- sd$var(temp^2) * (solve(fit0$var))[1,1]

    #
    # Fit the model with all covariates
    #   Start with initial values
    #
    nvar3 <- nvar2 + nfrail
    if (is.numeric(init)) {
	if (length(init) != nvar3) {
	    if (length(init) == nvar2) init <- c(rep(0,nfrail), init)
	    else stop("Wrong length for inital values")
	    }
	if (scale >0) init <- c(init, log(scale))
	}
    else  {
	# The algebra behind the 'glim' trick just doesn't work here
	#  Use the intercept fit + zeros
	#    coef order = frailty, intercept, other covariates, sigmas
	init <- c(rep(0, nfrail), fit0$coef[1], rep(0, nvar-1), fit0$coef[-1])
	}

    #
    # Tack on the sigmas to "assign", so that the df component includes
    #   the sigmas
    if (nstrat2 >0) assign <- c(assign, list(sigma=(1+nvar):nvar2))

    iter2 <- 0
    iterfail <- NULL
    thetasave <- unlist(thetalist)
    for (iterx in 1:outer.max) {
	fit <- .C(fitter[2],
		   iter = as.integer(iter.max),
		   as.integer(n),
		   as.integer(nvar),
		   as.double(y),
		   as.integer(ny),
		   as.double(x),
	           as.double(weights),
		   as.double(offset),
		   coef= as.double(init),
	           as.integer(nstrat2),
	           as.integer(strata),
		   u = double(3*(nvar3) + nvar2*nvar3),
		   hmat = double(nvar2*nvar3),
	           hinv = double(nvar2*nvar3),
		   loglik=double(1),
		   flag=integer(1),
		   as.double(eps),
	           as.double(toler.chol), 
		   as.integer(dnum),
	           debug = as.integer(debug),
	           as.integer(ptype),
		   as.integer(full.imat),
		   as.integer(nfrail),
		   as.integer(frailx),
	           fdiag = double(nvar3),f.expr1,f.expr2,fdensity, environment(),
                    PACKAGE="survival")

	if (debug>0) browser()
	iter <- iterx
	iter2 <- iter2 + fit$iter
	if (fit$iter >=iter.max) iterfail <- c(iterfail, iter)

	if (nfrail >0) {
	    fcoef <- fit$coef[1:nfrail]
	    coef  <- fit$coef[nfrail + 1:nvar2]
	    }
	else coef <- fit$coef[1:nvar2]

	# If any penalties were infinite, the C code has made fdiag=1 out
	#  of self-preservation (0 divides).  But such coefs are guarranteed
	#  zero so the variance should be too.)
	temp <- rep(FALSE, nvar2+nfrail)
	if (nfrail>0) temp[1:nfrail] <- coxlist1$flag
	if (ptype >1) temp[nfrail+ 1:nvar2] <- coxlist2$flag
	fdiag <- ifelse(temp, 0, fit$fdiag)

	if (need.df) {
            #get the penalty portion of the second derive matrix
	    if (nfrail>0) temp1 <- coxlist1$second
	    else 	  temp1 <- 0
	    if (ptype>1)  temp2 <- coxlist2$second
	    else          temp2 <- 0
					
	    dftemp <-coxpenal.df(matrix(fit$hmat, ncol=nvar2),  
			         matrix(fit$hinv, ncol=nvar2), fdiag, 
				 assign, ptype, nvar2,
		                 temp1, temp2, pindex[sparse])
	    df <- dftemp$df
	    var  <- dftemp$var
	    var2 <- dftemp$var2
	    pdf <- df[pterms>0]	          # df's for penalized terms
	    trH <- dftemp$trH[pterms>0]   # trace H 
	    }

	if (nfrail >0)  penalty <- -coxlist1$penalty
	else            penalty <- 0
	if (ptype >1) penalty <- penalty - coxlist2$penalty
	loglik <- fit$loglik + penalty  #C code returns PL - penalty
	if (iter==1) penalty0 <- penalty

	#
	# Call the control function(s)
	#
	done <- TRUE
	for (i in 1:length(cfun)) {
	    pen.col <- pcols[[i]]
	    temp <- eval(calls[i])
	    if (sparse[i]) theta1 <- temp$theta
	    thetalist[[i]] <- temp$theta
	    iterlist[[i]] <- temp
	    done <- done & temp$done
    	    }
	if (done) break

	# 
	# Choose starting estimates for the next iteration
	#
	if (iter==1) {
	    init <- coefsave <- fit$coef
	    thetasave <- cbind(thetasave, unlist(thetalist))
	    }
	else {
	    temp <- unlist(thetalist)
	    coefsave <- cbind(coefsave, fit$coef)
	    # temp = next guess for theta
	    # *save = prior thetas and the resultant fits
	    # choose as initial values the result for the closest old theta
	    howclose <- apply((thetasave-temp)^2,2, sum)
	    which <- min((1:iter)[howclose==min(howclose)])
	    init <- coefsave[,which]
	    thetasave <- cbind(thetasave, temp)
	    }
        }   #end of the iteration loop

    if (!need.df) {  #didn't need it iteration by iteration, but do it now
        #get the penalty portion of the second derive matrix
	if (nfrail>0) temp1 <- coxlist1$second
	else 	      temp1 <- 0
	if (ptype>1)  temp2 <- coxlist2$second
	else          temp2 <- 0
					
	dftemp <-coxpenal.df(matrix(fit$hmat,ncol=nvar2),  
			     matrix(fit$hinv,ncol=nvar2),  fdiag, 
		             assign, ptype, nvar2, 
		             temp1, temp2, pindex[sparse])
	df <- dftemp$df
	trH <- dftemp$trH
	var <- dftemp$var
	var2  <- dftemp$var2
        }

    if (iter.max >1 && length(iterfail)>0)
	    warning(paste("Inner loop failed to coverge for iterations", 
			  paste(iterfail, collapse=' ')))
    which.sing <- (fdiag[nfrail + 1:nvar] ==0)
    coef[which.sing] <- NA

    names(iterlist) <- names(pterms[pterms>0])
    cname <- varnames
    cname <- c(cname, rep("Log(scale)", nstrat2))
    dimnames(var) <- list(cname, cname)
    names(coef) <- cname

    if (nfrail >0) {
	lp <- offset + fcoef[frailx]
	lp <- lp + x %*%coef[1:nvar] 
	list(coefficients  = coef,
	     icoef = fit0$coef,
	     var    = var,
	     var2   = var2,
	     loglik = c(fit0$loglik, loglik),
	     iter   = c(iter, iter2),
	     linear.predictors = as.vector(lp),
	     frail = fcoef,
	     fvar  = dftemp$fvar,
	     df = df, 
	     penalty= c(penalty0, penalty),
	     pterms = pterms, assign2=assign,
	     history= iterlist,
	     printfun=printfun)
	}
    else {  #no sparse terms
	list(coefficients  = coef,
	     icoef = fit0$coef,
	     var    = var,
	     var2   = var2,
	     loglik = c(fit0$loglik, loglik),
	     iter   = c(iter, iter2),
	     linear.predictors = as.vector(x%*%coef[1:nvar]),
	     df = df, df2=dftemp$df2,
	     penalty= c(penalty0, penalty), 
	     pterms = pterms, assign2=assign,
	     history= iterlist,
	     printfun= printfun)
	}
    }
#SCCS  @(#)survreg.control.s	4.4 02/21/99
survreg.control <- function(maxiter=30, rel.tolerance=1e-9, failure=1,
			    toler.chol=1e-10, iter.max, debug=0,
			    outer.max = 10) {

    if (missing(iter.max)) {
	iter.max <- maxiter
	}
    else  maxiter <- iter.max
    list(iter.max = iter.max, rel.tolerance = rel.tolerance, 
	 failure =failure, toler.chol= toler.chol, debug=debug,
	 maxiter=maxiter, outer.max=outer.max)
    }
# SCCS @(#)survreg.distributions.s	4.7 02/06/99
#
# Create the survreg.distributions object
#
survreg.distributions <- local({list(
'extreme' = list(
    name = "Extreme value",
    variance = function(parm) pi^2/6,
    init  = function(x, weights, ...) {
	mean <- sum(x*weights)/ sum(weights)
	var  <- sum(weights*(x-mean)^2)/ sum(weights)
	c(mean + .572, var/1.64)
	},
    deviance= function(y, scale, parms) {
	status <- y[,ncol(y)]
	width <- ifelse(status==3,(y[,2] - y[,1])/scale, 1)
	temp <- width/(exp(width)-1)
	center <- ifelse(status==3, y[,1] - log(temp), y[,1])
	temp3 <- (-temp) + log(1- exp(-exp(width)))
	best <- ifelse(status==1, -(1+log(scale)),
				    ifelse(status==3, temp3, 0))
	list(center=center, loglik=best) 
	},
    density = function(x,parms) {
	w <- exp(x)
	ww <- exp(-w)
	cbind(1-ww, ww, w*ww, (1-w), w*(w-3) +1)
	},
    quantile = function(p,parms) log(-log(1-p))
    ),

logistic = list(
    name  = "Logistic",
    variance = function(parm) pi^2/3,
    init  = function(x, weights, ...) {
	mean <- sum(x*weights)/ sum(weights)
	var  <- sum(weights*(x-mean)^2)/ sum(weights)
	c(mean, var/3.2)
	},
    deviance= function(y, scale, parms) {
	status <- y[,ncol(y)]
	width <- ifelse(status==3,(y[,2] - y[,1])/scale, 0)
	center <- y[,1] - width/2
	temp2 <- ifelse(status==3, exp(width/2), 2) #avoid a log(0) message
	temp3 <- log((temp2-1)/(temp2+1))
	best <- ifelse(status==1, -log(4*scale),
				    ifelse(status==3, temp3, 0))
	list(center=center, loglik=best) 
	},
    density = function(x, parms) {
	w <- exp(x)
	cbind(w/(1+w), 1/(1+w), w/(1+w)^2, (1-w)/(1+w), (w*(w-4) +1)/(1+w)^2)
	},
    quantile = function(p, parms) log(p/(1-p))
    ),

gaussian = list(
    name  = "Gaussian",
    variance = function(parm) 1,
    init  = function(x, weights, ...) {
	mean <- sum(x*weights)/ sum(weights)
	var  <- sum(weights*(x-mean)^2)/ sum(weights)
	c(mean, var)
	},
    deviance= function(y, scale, parms) {
	status <- y[,ncol(y)]
	width <- ifelse(status==3,(y[,2] - y[,1])/scale, 0)
	center <- y[,1] - width/2
	temp2 <- log(1 - 2*pnorm(width/2))
	best <- ifelse(status==1, -log(sqrt(2*pi)*scale),
				ifelse(status==3, temp2, 0))
	list(center=center, loglik=best) 
	},
    density = function(x, parms) {
	cbind(pnorm(x), pnorm(-x), dnorm(x), -x, x^2-1)	
	},
    quantile = function(p, parms) qnorm(p)
    ),

weibull = list(
    name  = "Weibull",
    dist  = 'extreme',
    trans = function(y) log(y),
    dtrans= function(y) 1/y ,
    itrans= function(x) exp(x)
    ),

exponential = list(
    name  = "Exponential",
    dist  = 'extreme',
    trans = function(y) log(y),
    dtrans= function(y) 1/y,
    scale =1,
    itrans= function(x) exp(x)
    ),

rayleigh = list(
    name  = "Rayleigh",
    dist  = 'extreme',
    trans = function(y) log(y),
    dtrans= function(y) 1/y,
    itrans= function(x) exp(x),
    scale =0.5
    ),

loggaussian = list(
    name  = "Log Normal",
    dist  = 'gaussian',
    trans = function(y) log(y),
    itrans= function(x) exp(x),
    dtrans= function(y) 1/y
    ),

lognormal = list(
    name  = "Log Normal",
    dist  = 'gaussian',
    trans = function(y) log(y),
    itrans= function(x) exp(x),
    dtrans= function(y) 1/y
    ),

loglogistic = list(
    name = "Log logistic",
    dist = 'logistic',
    trans = function(y) log(y),
    dtrans= function(y) 1/y ,
    itrans= function(x) exp(x)
    ),

t = list(
    name  = "Student-t",
    variance = function(df) df/(df-2),
    parms = list(df=4),
    init  = function(x, weights, df) {
	if (df <=2) stop ("Invalid degrees of freedom for the t-distribution")
	mean <- sum(x*weights)/ sum(weights)
	var  <- sum(weights*(x-mean)^2)/ sum(weights)
	c(mean, var*(df-2)/df)
	},
    deviance= function(y, scale, parms) {
	status <- y[,ncol(y)]
	width <- ifelse(status==3,(y[,2] - y[,1])/scale, 0)
	center <- y[,1] - width/2
	temp2 <- log(1 - 2*pt(width/2, df=parms))
	best <- ifelse(status==1, -log(dt(0, df=parms)*scale),
				ifelse(status==3, temp2, 0))
	list(center=center, loglik=best) 
	},
    density = function(x, df) {
	cbind(pt(x, df), pt(-x, df), dt(x,df),
	      -(df+1)*x/(df+x^2), 
	      (df+1)*(x^2 *(df+3)/(df+x^2) - 1)/(df +x^2))
	},
    quantile = function(p, df) qt(p, df)
  )
)


})



# 
#  SCCS @(#)survreg.fit.s	5.10 07/10/00
#
survreg.fit<- function(x, y, weights, offset, init, controlvals, dist, 
		       scale=0, nstrat=1, strata, parms=NULL) {

    controlvals<-do.call("survreg.control",controlvals)
    iter.max <- controlvals$iter.max
    eps <- controlvals$rel.tol
    toler.chol <- controlvals$toler.chol
    debug <- controlvals$debug

    if (!is.matrix(x)) stop("Invalid X matrix ")
    n <- nrow(x)
    nvar <- ncol(x)
    ny <- ncol(y)
    if (is.null(offset)) offset <- rep(0,n)
    if (missing(weights)|| is.null(weights)) weights<- rep(1.0,n)
    else if (any(weights<=0)) stop("Invalid weights, must be >0")

    if (scale <0) stop("Invalid scale")
    if (scale >0 && nstrat >1) 
	    stop("Cannot have both a fixed scale and strata")
    if (nstrat>1 && (missing(strata) || length(strata)!= n))
	    stop("Invalid strata variable")
    if (nstrat==1) strata <- rep(1,n)
    if (scale >0)
        nstrat2 <- 0
    else
        nstrat2 <- nstrat

    if (is.character(dist)) {
	sd <- survreg.distributions[[dist]]
	if (is.null(sd)) stop ("Unrecognized distribution")
	}
    else sd <- dist
    if (!is.function(sd$density)) 
	stop("Missing density function in the definition of the distribution")
    dnum <- match(sd$name, c("Extreme value", "Logistic", "Gaussian"))
    if (is.na(dnum)) {
	# Not one of the "built-in distributions
	dnum <- 4
	fitter <- 'survreg3'
	#Set up the callback for the sparse frailty term
	n2 <- n + sum(y[,ny]==3)
	f.expr1 <- function(z){
            
	    if (length(parms)) temp <- sd$density(z, parms)
	    else               temp <- sd$density(z)
	    
	    if (!is.matrix(temp) || any(dim(temp) != c(n2,5)))
		    stop("Density function returned an invalid matrix")
	    list(z=z,density=as.vector(as.double(temp)))}
        
	survlist <- list(z=double(n2), density=double(n2*5))
	###.C("init_survcall", expr1,PACKAGE="survival")
	}
    else {
        fitter <- 'survreg2'
        f.expr1<-function(z) NULL
    }
    ##
    ## environment for callbacks
    rho<-environment()
    # This is a subset of residuals.survreg: define the first and second
    #   derivatives at z=0 for the 4 censoring types
    #   Used below for starting estimates
    derfun <- function(y, eta, sigma, density, parms) {
	ny <- ncol(y)
	status <- y[,ny]
	z <- (y[,1] - eta)/sigma
	dmat <- density(z,parms)
	dtemp<- dmat[,3] * dmat[,4]    #f'
	if (any(status==3)) {
	    z2 <- (y[,2] - eta)/sigma
	    dmat2 <- density(z2)
	    }
	else {
	    dmat2 <- matrix(0,1,5)   #dummy values
	    z2 <- 0
	    }
	tdenom <- ((status==0) * dmat[,2]) +
		  ((status==1) * 1 )       +
		  ((status==2) * dmat[,1]) +
		  ((status==3) * ifelse(z>0, dmat[,2]-dmat2[,2], 
		                             dmat2[,1] - dmat[,1]))
	tdenom <- 1/(tdenom* sigma)
	dg <- -tdenom   *(((status==0) * (0-dmat[,3])) +
			  ((status==1) * dmat[,4]) + 
			  ((status==2) * dmat[,3]) +
			  ((status==3) * (dmat2[,3]- dmat[,3])))

	ddg <- (tdenom/sigma)*(((status==0) * (0- dtemp)) +
			       ((status==1) * dmat[,5]) +
			       ((status==2) * dtemp) +
			       ((status==3) * (dmat2[,3]*dmat2[,4] - dtemp))) 
	list(dg = dg, ddg = ddg - dg^2)
	}

    #
    # Fit the model with just a mean and scale
    #    assume initial values don't apply here
    # Unless, of course, someone is fitting a mean only model!
    #
    meanonly <- (nvar==1 && all(x==1))
    if (!meanonly) {
	yy <- ifelse(y[,ny]!=3, y[,1], (y[,1]+y[,2])/2 )
	coef <- sd$init(yy, weights, parms)
	#init returns \sigma^2, I need log(sigma)
	# We sometimes get into trouble with a small estimate of sigma,
	#  (the surface isn't SPD), but never with a large one.  Double it.
	if (scale >0) vars <- log(scale)
	else vars <- log(coef[2])/2  +.7
	coef <- c(coef[1], rep(vars, nstrat))
	
	# get a better initial value for the mean using the "glim" trick
	deriv <- derfun(y, yy, exp(vars), sd$density, parms)
	wt <-  -1*deriv$ddg*weights
	coef[1] <- sum(weights*deriv$dg + wt*(yy -offset)) / sum(wt)

	# Now the fit proper (intercept only)
	nvar2 <- 1 +nstrat2
	fit0 <- .C(fitter,
		       iter = as.integer(iter.max),
		       as.integer(n),
		       as.integer(1),
		       as.double(y),
		       as.integer(ny),
		       as.double(rep(1.0, n)),
		       as.double(weights),
		       as.double(offset),
		       coef= as.double(coef),
		       as.integer(nstrat2),
		       as.integer(strata),
		       u = double(3*(nvar2) + nvar2^2),
		       var = matrix(0.0, nvar2, nvar2),
		       loglik=double(1),
		       flag=integer(1),
		       as.double(eps),
		       as.double(toler.chol), 
		       as.integer(dnum),
		       debug = as.integer(floor(debug/2)),
                       Rexpr=f.expr1, Renv=rho,
                   PACKAGE="survival")
	}

    #
    # Fit the model with all covariates
    #
    nvar2 <- nvar + nstrat2
    if (is.numeric(init)) {
	if (length(init) != nvar2) stop("Wrong length for initial parameters")
	if (scale >0) init <- c(init, log(scale))
	}
    else  {
	# Do the 'glim' method of finding an initial value of coef
	if (meanonly) {
	    yy <- ifelse(y[,ny]!=3, y[,1], (y[,1]+y[,2])/2 )
	    coef <- sd$init(yy, weights, parms)
	    if (scale >0) vars <- rep(log(scale), nstrat)
	    else vars  <- rep(log(coef[2])/2 + .7, nstrat)  
	    }
	else vars <- fit0$coef[-1]
	eta <- yy - offset     #what would be true for a 'perfect' model

	deriv <- derfun(y, yy, exp(vars[strata]), sd$density, parms)
	wt <-  -1*deriv$ddg*weights
	coef <- coxph.wtest(t(x)%*% (wt*x), 
		       c((wt*eta + weights*deriv$dg)%*% x),
			    toler=toler.chol)$solve
	init <- c(coef, vars)
	}

    # Now for the fit in earnest

    fit <- .C(fitter,
		   iter = as.integer(iter.max),
		   n = as.integer(n),
		   as.integer(nvar),
		   as.double(y),
		   as.integer(ny),
		   as.double(x),
	           as.double(weights),
		   as.double(offset),
		   coef= as.double(init),
	           as.integer(nstrat2),
	           as.integer(strata),
		   u = double(3*(nvar2) + nvar2^2),
		   var = matrix(0.0, nvar2, nvar2),
		   loglik=double(1),
		   flag=integer(1),
		   as.double(eps),
	           as.double(toler.chol), 
		   as.integer(dnum),
                   debug = as.integer(debug),
              Rexpr=f.expr1, Renv=rho,
              PACKAGE="survival")

    if (debug>0) browser()
    if (iter.max >1 && fit$flag > nvar2) {
	if (controlvals$failure==1)
	       warning("Ran out of iterations and did not converge")
	else if (controlvals$failure==2)
	       return("Ran out of iterations and did not converge")
	}

    cname <- dimnames(x)[[2]]
    if (is.null(cname)) cname <- paste("x", 1:ncol(x))
    if (scale==0) cname <- c(cname, rep("Log(scale)", nstrat))
    dimnames(fit$var) <- list(cname, cname)
    if (scale>0) fit$coef <- fit$coef[1:nvar2]
    names(fit$coef) <- cname

    if (meanonly) {
	coef0 <- fit$coef
	loglik <- rep(fit$loglik,2)
	}
    else {
	coef0 <- fit0$coef
	names(coef0) <- c("Intercept", rep("Log(scale)", nstrat))
	loglik <- c(fit0$loglik, fit$loglik)
	}
    temp <- list(coefficients   = fit$coef,
		 icoef  = coef0, 
		 var    = fit$var,
		 loglik = loglik, 
		 iter   = fit$iter,
		 linear.predictors = c(x %*% fit$coef[1:nvar]),	
		 df     = length(fit$coef)
		 )
    if (debug>0) {
	temp$u <- fit$u[1:nvar2]
	JJ     <- matrix(fit$u[-seq(1, 3*nvar2)], nvar2, nvar2)
	temp$JJ <- JJ
	temp$var2 <- fit$var %*% JJ %*% fit$var
	}

    temp
    }
#  SCCS  @(#)survreg.old.s	1.4 02/21/99
# Map the argument list of the old survreg to the new one
#
survreg.old <- function(formula, data=sys.frame(sys.parent()), ...,
        link=c('log',"identity"),
        dist=c("extreme", "logistic", "gaussian", "exponential",
               "rayleigh", "weibull"),
	fixed=list()) {
    
    dist <- match.arg(dist)
    link <- match.arg(link)
    
    if ((dist!='weibull' && dist != 'rayleigh') && link=='log') {
	if (dist=='extreme') dist <- 'weibull'
	else dist <- paste('log', dist, sep='')
	}
    if (is.null(fixed$scale)) scale <- 0
    else scale <- fixed$scale

    survreg(formula, data, ..., dist=dist, scale=scale)
    }
#
# SCCS @(#)survreg.s	5.8 07/10/00
#  The newest version of survreg, that accepts penalties and strata
#

survreg <- function(formula=formula(data), data=parent.frame(),
	weights, subset, na.action, dist='weibull', 
	init=NULL,  scale=0, control=survreg.control(), parms=NULL, 
	model=FALSE, x=FALSE, y=TRUE, ...) {

    call <- match.call()
    m <- match.call(expand=FALSE)
    temp <- c("", "formula", "data", "weights", "subset", "na.action")
    m <- m[ match(temp, names(m), nomatch=0)]
    m[[1]] <- as.name("model.frame")
    special <- c("strata", "cluster")
    Terms <- if(missing(data)) terms(formula, special)
             else              terms(formula, special, data=data)
    m$formula <- Terms
    m <- eval(m, parent.frame())
    ### I commented this out last time -- don't know why
    ###Terms <- attr(m, 'terms')

    weights <- model.extract(m, 'weights')
    Y <- model.extract(m, "response")
    if (!inherits(Y, "Surv")) stop("Response must be a survival object")

    strats <- attr(Terms, "specials")$strata
    cluster<- attr(Terms, "specials")$cluster
    dropx <- NULL
    if (length(cluster)) {
        if (missing(robust)) robust <- TRUE
        tempc <- untangle.specials(Terms, 'cluster', 1:10)
        ord <- attr(Terms, 'order')[tempc$terms]
        if (any(ord>1)) stop ("Cluster can not be used in an interaction")
        cluster <- strata(m[,tempc$vars], shortlabel=TRUE)  #allow multiples
        dropx <- tempc$terms
        }
    if (length(strats)) {
        temp <- untangle.specials(Terms, 'strata', 1)
        dropx <- c(dropx, temp$terms)
        if (length(temp$vars)==1) strata.keep <- m[[temp$vars]]
        else strata.keep <- strata(m[,temp$vars], shortlabel=TRUE)
        strata <- as.numeric(strata.keep)
	nstrata <- max(strata)
        }
    else {
	nstrata <- 1
	strata <- 0
	}

    if (length(dropx)) newTerms<-Terms[-dropx]
    else               newTerms<-Terms
    X<-model.matrix(newTerms,m)
    
    n <- nrow(X)
    nvar <- ncol(X)

    offset<- attr(Terms, "offset")
    if (!is.null(offset)) offset <- as.numeric(m[[offset]])
    else                  offset <- rep(0, n)

    if (is.character(dist)) {
	dlist <- survreg.distributions[[dist]]
	if (is.null(dlist)) stop(paste(dist, ": distribution not found"))
	}
    else if (is.list(dist)) dlist <- dist
    else stop("Invalid distribution object")
    if (is.null(dlist$dist)) {
	if (is.character(dlist$name) && is.function(dlist$init) &&
	    is.function(dlist$deviance)) {}
	else stop("Invalid distribution object")
	}
    else {
	if (!is.character(dlist$name) || is.null(dlist$dist) ||
	    !is.function(dlist$trans) || !is.function(dlist$dtrans))
		stop("Invalid distribution object")
	}	

    type <- attr(Y, "type")
    if (type== 'counting') stop ("Invalid survival type")
    
    logcorrect <- 0   #correction to the loglik due to transformations
    if (!is.null(dlist$trans)) {
	tranfun <- dlist$trans
	exactsurv <- Y[,ncol(Y)] ==1
	if (any(exactsurv)) logcorrect <-sum(log(dlist$dtrans(Y[exactsurv,1])))

	if (type=='interval') {
	    if (any(Y[,3]==3))
		    Y <- cbind(tranfun(Y[,1:2]), Y[,3])
	    else Y <- cbind(tranfun(Y[,1]), Y[,3])
	    }
	else if (type=='left')
	     Y <- cbind(tranfun(Y[,1]), 2-Y[,2])
	else     Y <- cbind(tranfun(Y[,1]), Y[,2])
	if (!all(is.finite(Y))) 
	    stop("Invalid survival times for this distribution")
	}
    else {
	if (type=='left') Y[,2] <- 2- Y[,2]
	else if (type=='interval' && all(Y[,3]<3)) Y <- Y[,c(1,3)]
	}

    if (is.null(dlist$itrans)) itrans <- function(x) x
    else itrans <- dlist$itrans

    if (!is.null(dlist$scale)) {
	if (!missing(scale)) warning(paste(dlist$name, 
			   "has a fixed scale, user specified value ignored"))
	scale <- dlist$scale
	}
    if (!is.null(dlist$dist)){
        if (is.atomic(dlist$dist))
            dlist <- survreg.distributions[[dlist$dist]]
        else
            dlist<-dlist$dist #<TSL>
    }
    if (missing(control)) control <- survreg.control(...)

    if (scale < 0) stop("Invalid scale value")
    if (scale >0 && nstrata >1) 
	    stop("Cannot have multiple strata with a fixed scale")

    # Check for penalized terms
    pterms <- sapply(m, inherits, 'coxph.penalty')
    if (any(pterms)) {
	pattr <- lapply(m[pterms], attributes)
	# 
	# the 'order' attribute has the same components as 'term.labels'
	#   pterms always has 1 more (response), sometimes 2 (offset)
	# drop the extra parts from pterms
	temp <- c(attr(Terms, 'response'), attr(Terms, 'offset'))
	if (length(dropx)) temp <- c(temp, dropx+1)
	pterms <- pterms[-temp]
	temp <- match((names(pterms))[pterms], attr(Terms, 'term.labels'))
	ord <- attr(Terms, 'order')[temp]
	if (any(ord>1)) stop ('Penalty terms cannot be in an interaction')
	##pcols <- (attr(X, 'assign')[-1])[pterms]
        assign<-attrassign(X,newTerms)
        pcols<-assign[-1][pterms]
  
        fit <- survpenal.fit(X, Y, weights, offset, init=init,
				controlvals = control,
			        dist= dlist, scale=scale,
			        strata=strata, nstrat=nstrata,
				pcols, pattr,assign, parms=parms)
	}
    else fit <- survreg.fit(X, Y, weights, offset, 
			    init=init, controlvals=control,
			    dist= dlist, scale=scale, nstrat=nstrata, 
			    strata, parms=parms)

    if (is.character(fit))  fit <- list(fail=fit)  #error message
    else {
	if (scale==0) {
	    nvar <- length(fit$coef) - nstrata
	    fit$scale <- exp(fit$coef[-(1:nvar)])
	    if (nstrata==1) names(fit$scale) <- NULL
	    else names(fit$scale) <- levels(strata.keep)
	    fit$coefficients  <- fit$coefficients[1:nvar]
	    fit$idf  <- 1 + nstrata
	    }
	else {
	    fit$scale <- scale
	    fit$idf  <- 1
	    }
	fit$loglik <- fit$loglik + logcorrect
	}

    na.action <- attr(m, "na.action")
    if (length(na.action)) fit$na.action <- na.action
    fit$df.residual <- n - sum(fit$df)
#   fit$fitted.values <- itrans(fit$linear.predictors)
    fit$terms <- Terms
    fit$formula <- as.vector(attr(Terms, "formula"))
    fit$means <- apply(X,2, mean)
    fit$call <- call
    fit$dist <- dist
    fit$df.resid<n-sum(fit$df) ##used for anova.survreg
    if (model) fit$model <- m
    if (x)     fit$x <- X
    if (y)     fit$y <- Y
    if (length(parms)) fit$parms <- parms
    if (any(pterms)) class(fit)<- c('survreg.penal', 'survreg')
    else	     class(fit) <- 'survreg'
    fit
    }
# SCCS @(#)tcut.s	5.3 11/03/98
tcut <-  function (x, breaks, labels, scale=1){
    # avoid some problems with dates
    x <- as.numeric(x)
    breaks <- as.numeric(breaks)

    if(length(breaks) == 1) {
	if(breaks < 1)
		stop("Must specify at least one interval")
	if(missing(labels))
		labels <- paste("Range", seq(length = breaks))
	else if(length(labels) != breaks)
		stop("Number of labels must equal number of intervals")
	r <- range(x[!is.na(x)])
	r[is.na(r)] <- 1
	if((d <- diff(r)) == 0) {
		r[2] <- r[1] + 1
		d <- 1
	    }
	breaks <- seq(r[1] - 0.01 * d, r[2] + 0.01 * d, length = breaks +1)
	}
    else {
	if(is.na(adb <- all(diff(breaks) >= 0)) || !adb)
	   stop("breaks must be given in ascending order and contain no NA's")
	if(missing(labels))
	    labels <- paste(format(breaks[ - length(breaks)]),
			"+ thru ", format(breaks[-1]), sep = "")
	else if(length(labels) != length(breaks) - 1)
	   stop("Number of labels must be 1 less than number of break points")
	}

    temp <- structure(x*scale, cutpoints=breaks*scale, labels=labels)
    class(temp) <- 'tcut'
    temp
    }

"[.tcut" <- function(x, ..., drop=FALSE) {
    atts <- attributes(x)
    class(x) <- NULL
    x <- x[..1]
    attributes(x) <- atts
    class(x) <- 'tcut'
    x
    }
#SCCS @(#)untangle.specials.s	4.3 08/30/98
untangle.specials <- function(tt, special, order=1) {

    spc <- attr(tt, 'specials')[[special]]
    if (length(spc)==0)
	return(list(vars=character(0), terms=numeric(0)))

    facs <- attr(tt, 'factor')
    fname <- dimnames(facs)
    ff <- apply(facs[spc,,drop=FALSE], 2, sum)
    list(vars= (fname[[1]])[spc],
	     terms= seq(ff)[ff & match(attr(tt, 'order'), order, nomatch=0)])
    }

if (exists("vcov") & !exists("vcov.coxph")){                            
    vcov.coxph<-function (object, ...) 
        object$var
    vcov.survreg<-function (object, ...) 
        object$var
}



 anova.coxph<-function (object, ...,  test = NULL) 
{
    if (length(object$rscore)>0)
        stop("Can't do anova tables with robust variances")
    dotargs <- list(...)
    named <- if (is.null(names(dotargs))) 
        rep(FALSE, length(dotargs))
    else (names(dotargs) != "")
    if (any(named)) 
        warning(paste("The following arguments to anova.coxph(..)", 
            "are invalid and dropped:", paste(deparse(dotargs[named]), 
                collapse = ", ")))
    dotargs <- dotargs[!named]
    is.coxmodel <- unlist(lapply(dotargs, function(x) inherits(x, 
        "coxph")))
    dotargs <- dotargs[is.coxmodel]
    if (length(dotargs) > 0) 
        return(anova.coxphlist(c(list(object), dotargs), test = test))
    varlist <- attr(object$terms, "variables")
    termlist<-attr(object$terms,"term.labels")
    resdev <- resdf <- NULL
    form<-".~."
    fenv<-environment(formula(object))
    nvars<-length(varlist)
    if (nvars > 1) {
        for (i in rev(termlist[-1])) {
            form<-paste(form,i,sep="-")
            fit <-update(object,as.formula(form,env=fenv))
            resdev <- c(resdev, -2*fit$loglik[2])
            resdf <- c(resdf,object$n-sum(!is.na(coef(fit))))
        }
    }
    resdf <- c(object$n, rev(resdf), object$n-sum(!is.na(coef(object))))
    resdev <- c(-2*object$loglik[1], rev(resdev), -2*object$loglik[2])
    table <- data.frame(c(NA, -diff(resdf)), c(NA, pmax(0, -diff(resdev))), 
        resdf, resdev)
    if (nvars == 0) 
        table <- table[1, , drop = FALSE]
    dimnames(table) <- list(c("NULL", attr(object$terms, "term.labels")), 
        c("Df", "Deviance", "Resid. Df", "Resid. Dev"))
    title <- paste("Analysis of Deviance Table\n Cox model: response is ",deparse(object$terms[[2]]),"\nTerms added sequentially (first to last)\n", 
        sep = "")
    df.dispersion <- Inf
    dispersion<-1
    if (!is.null(test)) 
        table <- stat.anova(table = table, test = test, scale = dispersion, 
            df.scale = df.dispersion, n = NROW(x))
    structure(table, heading = title, class = c("anova", "data.frame"))
}

anova.coxphlist<-function (object, ..., test = NULL) 
{
    responses <- as.character(lapply(object, function(x) {
        deparse(formula(x)[[2]])
    }))
    sameresp <- responses == responses[1]
    if (!all(sameresp)) {
        object <- object[sameresp]
        warning(paste("Models with response", deparse(responses[!sameresp]), 
            "removed because response differs from", "model 1"))
    }
    ns <- sapply(object, function(x) length(x$residuals))
    if (any(ns != ns[1])) 
        stop("models were not all fitted to the same size of dataset")
    nmodels <- length(object)
    if (nmodels == 1) 
        return(anova.glm(object[[1]], dispersion = dispersion, 
            test = test))
    resdf <- as.numeric(lapply(object, function(x) x$n-sum(!is.na(coef(x)))))
    resdev <- as.numeric(lapply(object, function(x) -2*x$loglik[2]))
    table <- data.frame(resdf, resdev, c(NA, -diff(resdf)), c(NA, 
        -diff(resdev)))
    variables <- lapply(object, function(x) paste(deparse(formula(x)), 
        collapse = "\n"))
    dimnames(table) <- list(1:nmodels, c("Resid. Df", "Resid. Dev", 
        "Df", "Deviance"))
    title <- "Analysis of Deviance Table\n"
    topnote <- paste("Model ", format(1:nmodels), ": ", variables, 
        sep = "", collapse = "\n")
    if (!is.null(test)) {
        bigmodel <- object[[order(resdf)[1]]]
        dispersion <-1
        df.dispersion <-  Inf
        table <- stat.anova(table = table, test = test, scale = dispersion, 
            df.scale = df.dispersion, n = length(bigmodel$residuals))
    }
    structure(table, heading = c(title, topnote), class = c("anova", 
        "data.frame"))
}
