source("mlp.r") trnx = as.matrix(read.table("a2trnx",head=F)) tstx = as.matrix(read.table("a2tstx",head=F)) trny = scan("a2trny") tsty = scan("a2tsty") # Try logistic regression. m = glm (trny ~ trnx, family = "binomial") print(summary(m)) p1.logistic = 1 / (1 + exp(-(coef(m)[1] + tstx %*% coef(m)[2:(ncol(tstx)+1)]))) cat("Error rate with logistic regression:\n"); print(mean(tsty != (p1.logistic>0.5))) val.set = 1:427 est.set = 428:1708 postscript("ass2-plts.ps",horiz=T) par(mfrow=c(2,4)) # Try MLP with varous learning rates. iters = c ( 60000, 30000, 20000, 15000, 10000, 60000, 60000, 60000) eta1 = c (0.000012, 0.000025, 0.00005, 0.0001, 0.0002, 0.000012, 0.000012, 0.000020) eta2 = c (0.000012, 0.000025, 0.00005, 0.0001, 0.0002, 0.000020, 0.000030, 0.000012) for (i in 1:length(iters)) { set.seed(123) params = mlp.cv (trny, trnx, eta1[i], eta2[i], val.set, iters=iters[i], q=15, cv.plot=T) title(paste("eta1 =",eta1[i]," eta2 =",eta2[i])) p1.mlp = mlp.predict(tstx,params) cat("Error rate with mlp using eta1 =",eta1[i],"and eta2 =",eta2[i],":\n") print(mean(tsty != (p1.mlp>0.5))) }