{"id":274,"date":"2013-07-07T13:06:37","date_gmt":"2013-07-07T09:36:37","guid":{"rendered":"http:\/\/vua.nadiran.com\/?p=274"},"modified":"2013-07-10T14:52:45","modified_gmt":"2013-07-10T11:22:45","slug":"%d9%86%d9%85%d9%88%d9%86%d9%87-%d9%87%d8%a7%db%8c-%d8%a8%d8%b1%d9%86%d8%a7%d9%85%d9%87-lasso-%d8%af%d8%b1-%d9%be%da%a9%db%8c%da%86-r","status":"publish","type":"post","link":"https:\/\/vua.nadiran.com\/?p=274","title":{"rendered":"\u0646\u0645\u0648\u0646\u0647 \u0647\u0627\u06cc \u0628\u0631\u0646\u0627\u0645\u0647 \u0631\u06af\u0631\u0633\u06cc\u0648\u0646 \u062f\u0631 \u067e\u06a9\u06cc\u0686 R"},"content":{"rendered":"<p style=\"direction: ltr;\">\n<p style=\"direction: ltr;\">## Practical session: Linear regression<\/p>\n<p style=\"direction: ltr;\">## Jean-Philippe.Vert@mines.org<\/p>\n<p style=\"direction: ltr;\">##<\/p>\n<p style=\"direction: ltr;\">## In this practical session to test several linear regression methods on the prostate cancer dataset<\/p>\n<p style=\"direction: ltr;\">##<\/p>\n<p style=\"direction: ltr;\">####################################<\/p>\n<p style=\"direction: ltr;\">## Prepare data<\/p>\n<p style=\"direction: ltr;\">####################################<\/p>\n<p style=\"direction: ltr;\"># Download prostate data<\/p>\n<p style=\"direction: ltr;\">con = url (&#8220;http:\/\/www-stat.stanford.edu\/~tibs\/ElemStatLearn\/datasets\/prostate.data&#8221;)<\/p>\n<p style=\"direction: ltr;\">prost=read.csv(con,row.names=1,sep=&#8221;\\t&#8221;)<\/p>\n<p style=\"direction: ltr;\"># Alternatively, load the file and read from local file as follows<\/p>\n<p style=\"direction: ltr;\"># prost=read.csv(&#8216;prostate.data.txt&#8217;,row.names=1,sep=&#8221;\\t&#8221;)<\/p>\n<p style=\"direction: ltr;\"># Scale data and prepare train\/test split<\/p>\n<p style=\"direction: ltr;\">prost.std &lt;- data.frame(cbind(scale(prost[,1:8]),prost$lpsa))<\/p>\n<p style=\"direction: ltr;\">names(prost.std)[9] &lt;- &#8216;lpsa&#8217;<\/p>\n<p style=\"direction: ltr;\">data.train &lt;- prost.std[prost$train,]<\/p>\n<p style=\"direction: ltr;\">data.test &lt;- prost.std[!prost$train,]<\/p>\n<p style=\"direction: ltr;\">y.test &lt;- data.test$lpsa<\/p>\n<p style=\"direction: ltr;\">n.train &lt;- nrow(data.train)<\/p>\n<p style=\"direction: ltr;\">\n<p style=\"direction: ltr;\">####################################<\/p>\n<p style=\"direction: ltr;\">## Ordinary least squares<\/p>\n<p style=\"direction: ltr;\">####################################<\/p>\n<p style=\"direction: ltr;\">m.ols &lt;- lm(lpsa ~ . , data=data.train)<\/p>\n<p style=\"direction: ltr;\">summary(m.ols)<\/p>\n<p style=\"direction: ltr;\">y.pred.ols &lt;- predict(m.ols,data.test)<\/p>\n<p style=\"direction: ltr;\">summary((y.pred.ols &#8211; y.test)^2)<\/p>\n<p style=\"direction: ltr;\">\n<p style=\"direction: ltr;\"># the importance of a feature depends on other features. Compare the following. Explain (visualize).<\/p>\n<p style=\"direction: ltr;\">summary(lm(lpsa~svi,data=datatrain))<\/p>\n<p style=\"direction: ltr;\">summary(lm(lpsa~svi+lcavol,data=datatrain))<\/p>\n<p style=\"direction: ltr;\">\n<p style=\"direction: ltr;\">####################################<\/p>\n<p style=\"direction: ltr;\">## Best subset selection<\/p>\n<p style=\"direction: ltr;\">####################################<\/p>\n<p style=\"direction: ltr;\">library(leaps)<\/p>\n<p style=\"direction: ltr;\">l &lt;- leaps(data.train[,1:8],data.train[,9],method=&#8217;r2&#8242;)<\/p>\n<p style=\"direction: ltr;\">plot(l$size,l$r2)<\/p>\n<p style=\"direction: ltr;\">l &lt;- leaps(data.train[,1:8],data.train[,9],method=&#8217;Cp&#8217;)<\/p>\n<p style=\"direction: ltr;\">plot(l$size,l$Cp)<\/p>\n<p style=\"direction: ltr;\"># Select best model according to Cp<\/p>\n<p style=\"direction: ltr;\">bestfeat &lt;- l$which[which.min(l$Cp),]<\/p>\n<p style=\"direction: ltr;\"># Train and test the model on the best subset<\/p>\n<p style=\"direction: ltr;\">m.bestsubset &lt;- lm(lpsa ~ .,data=data.train[,bestfeat])<\/p>\n<p style=\"direction: ltr;\">summary(m.bestsubset)<\/p>\n<p style=\"direction: ltr;\">y.pred.bestsubset &lt;- predict(m.bestsubset,data.test[,bestfeat])<\/p>\n<p style=\"direction: ltr;\">summary((y.pred.bestsubset &#8211; y.test)^2)<\/p>\n<p style=\"direction: ltr;\">####################################<\/p>\n<p style=\"direction: ltr;\">## Greedy subset selection<\/p>\n<p style=\"direction: ltr;\">####################################<\/p>\n<p style=\"direction: ltr;\">library(MASS)<\/p>\n<p style=\"direction: ltr;\"># Forward selection<\/p>\n<p style=\"direction: ltr;\">m.init &lt;- lm(lpsa ~ 1 , data=data.train)<\/p>\n<p style=\"direction: ltr;\">m.forward &lt;- stepAIC(m.init, scope=list(upper=~lcavol+lweight+age+lbph+svi+lcp+gleason+pgg45,lower=~1) , direction=&#8221;forward&#8221;)<\/p>\n<p style=\"direction: ltr;\">y.pred.forward &lt;- predict(m.forward,data.test)<\/p>\n<p style=\"direction: ltr;\">summary((y.pred.forward &#8211; y.test)^2)<\/p>\n<p style=\"direction: ltr;\"># Backward selection<\/p>\n<p style=\"direction: ltr;\">m.init &lt;- lm(lpsa ~ . , data=data.train)<\/p>\n<p style=\"direction: ltr;\">m.backward &lt;- stepAIC(m.init , direction=&#8221;backward&#8221;)<\/p>\n<p style=\"direction: ltr;\">y.pred.backward &lt;- predict(m.backward,data.test)<\/p>\n<p style=\"direction: ltr;\">summary((y.pred.backward &#8211; y.test)^2)<\/p>\n<p style=\"direction: ltr;\"># Hybrid selection<\/p>\n<p style=\"direction: ltr;\">m.init &lt;- lm(lpsa ~ 1 , data=data.train)<\/p>\n<p style=\"direction: ltr;\">m.hybrid &lt;- stepAIC(m.init, scope=list(upper=~lcavol+lweight+age+lbph+svi+lcp+gleason+pgg45,lower=~1) , direction=&#8221;both&#8221;)<\/p>\n<p style=\"direction: ltr;\">y.pred.hybrid &lt;- predict(m.hybrid,data.test)<\/p>\n<p style=\"direction: ltr;\">summary((y.pred.hybrid &#8211; y.test)^2)<\/p>\n<p style=\"direction: ltr;\">####################################<\/p>\n<p style=\"direction: ltr;\">## Ridge regression<\/p>\n<p style=\"direction: ltr;\">####################################<\/p>\n<p style=\"direction: ltr;\">library(MASS)<\/p>\n<p style=\"direction: ltr;\">m.ridge &lt;- lm.ridge(lpsa ~ .,data=data.train, lambda = seq(0,20,0.1))<\/p>\n<p style=\"direction: ltr;\">plot(m.ridge)<\/p>\n<p style=\"direction: ltr;\"># select parameter by minimum GCV<\/p>\n<p style=\"direction: ltr;\">plot(m.ridge$GCV)<\/p>\n<p style=\"direction: ltr;\"># Predict is not implemented so we need to do it ourselves<\/p>\n<p style=\"direction: ltr;\">y.pred.ridge = scale(data.test[,1:8],center = F, scale = m.ridge$scales)%*% m.ridge$coef[,which.min(m.ridge$GCV)] + m.ridge$ym<\/p>\n<p style=\"direction: ltr;\">summary((y.pred.ridge &#8211; y.test)^2)<\/p>\n<p style=\"direction: ltr;\">####################################<\/p>\n<p style=\"direction: ltr;\">## Lasso<\/p>\n<p style=\"direction: ltr;\">####################################<\/p>\n<p style=\"direction: ltr;\">library(lars)<\/p>\n<p style=\"direction: ltr;\">m.lasso &lt;- lars(as.matrix(data.train[,1:8]),data.train$lpsa)<\/p>\n<p style=\"direction: ltr;\">plot(m.lasso)<\/p>\n<p style=\"direction: ltr;\"># Cross-validation<\/p>\n<p style=\"direction: ltr;\">r &lt;- cv.lars(as.matrix(data.train[,1:8]),data.train$lpsa)<\/p>\n<p style=\"direction: ltr;\">bestfraction &lt;- r$fraction[which.min(r$cv)]<\/p>\n<p style=\"direction: ltr;\">##### Note 5\/8\/11: in the new lars package 0.9-8, the field r$fraction seems to have been replaced by r$index. The previous line should therefore be replaced by:<\/p>\n<p style=\"direction: ltr;\"># bestfraction &lt;- r$index[which.min(r$cv)]<\/p>\n<p style=\"direction: ltr;\"># Observe coefficients<\/p>\n<p style=\"direction: ltr;\">coef.lasso &lt;- predict(m.lasso,as.matrix(data.test[,1:8]),s=bestfraction,type=&#8221;coefficient&#8221;,mode=&#8221;fraction&#8221;)<\/p>\n<p style=\"direction: ltr;\">coef.lasso<\/p>\n<p style=\"direction: ltr;\"># Prediction<\/p>\n<p style=\"direction: ltr;\">y.pred.lasso &lt;- predict(m.lasso,as.matrix(data.test[,1:8]),s=bestfraction,type=&#8221;fit&#8221;,mode=&#8221;fraction&#8221;)$fit<\/p>\n<p style=\"direction: ltr;\">summary((y.pred.lasso &#8211; y.test)^2)<\/p>\n<p style=\"direction: ltr;\">\n<p style=\"direction: ltr;\">####################################<\/p>\n<p style=\"direction: ltr;\">## PCR<\/p>\n<p style=\"direction: ltr;\">####################################<\/p>\n<p style=\"direction: ltr;\">library(pls)<\/p>\n<p style=\"direction: ltr;\">m.pcr &lt;- pcr(lpsa ~ .,data=data.train , validation=&#8221;CV&#8221;)<\/p>\n<p style=\"direction: ltr;\"># select number of components (by CV)<\/p>\n<p style=\"direction: ltr;\">ncomp &lt;- which.min(m.pcr$validation$adj)<\/p>\n<p style=\"direction: ltr;\"># predict<\/p>\n<p style=\"direction: ltr;\">y.pred.pcr &lt;- predict(m.pcr,data.test , ncomp=ncomp)<\/p>\n<p style=\"direction: ltr;\">summary((y.pred.pcr &#8211; y.test)^2)<\/p>\n<p style=\"direction: ltr;\">\n<p style=\"direction: ltr;\">####################################<\/p>\n<p style=\"direction: ltr;\">## PLS<\/p>\n<p style=\"direction: ltr;\">####################################<\/p>\n<p style=\"direction: ltr;\">library(pls)<\/p>\n<p style=\"direction: ltr;\">m.pls &lt;- plsr(lpsa ~ .,data=data.train , validation=&#8221;CV&#8221;)<\/p>\n<p style=\"direction: ltr;\"># select number of components (by CV)<\/p>\n<p style=\"direction: ltr;\">ncomp &lt;- which.min(m.pls$validation$adj)<\/p>\n<p style=\"direction: ltr;\"># predict<\/p>\n<p style=\"direction: ltr;\">y.pred.pcr &lt;- predict(m.pls,data.test , ncomp=ncomp)<\/p>\n<p style=\"direction: ltr;\">summary((y.pred.pcr &#8211; y.test)^2)<\/p>\n<p style=\"direction: ltr;\">\n<pre style=\"direction: ltr;\">\r\n\r\n\r\n\r\n<\/pre>\n<p style=\"direction: ltr;\">\n<p style=\"direction: ltr;\">%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%<\/p>\n<p style=\"direction: ltr;\">%<\/p>\n<p style=\"direction: ltr;\">% File: Lasso.m<\/p>\n<p style=\"direction: ltr;\">% Author: Jinzhu Jia<\/p>\n<p style=\"direction: ltr;\">%<\/p>\n<p style=\"direction: ltr;\">% One simple (Gradiant Descent) implementation of the Lasso<\/p>\n<p style=\"direction: ltr;\">% The objective function is:<\/p>\n<p style=\"direction: ltr;\">% \u06f1\/\u06f2*sum((Y &#8211; X * beta -beta0).^2) + lambda * sum(abs(beta))<\/p>\n<p style=\"direction: ltr;\">% Comparison with CVX code is done<\/p>\n<p style=\"direction: ltr;\">% Reference: To be uploaded<\/p>\n<p style=\"direction: ltr;\">%<\/p>\n<p style=\"direction: ltr;\">% Version 4.23.2010<\/p>\n<p style=\"direction: ltr;\">%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%<\/p>\n<p style=\"direction: ltr;\">function [beta0,beta] = Lasso(X,Y,lambda)<\/p>\n<p style=\"direction: ltr;\">n = size(X,1);<\/p>\n<p style=\"direction: ltr;\">p = size(X,2);<\/p>\n<p style=\"direction: ltr;\">if(size(Y,1) ~= n)<\/p>\n<p style=\"direction: ltr;\">fprintf( &#8216;Error: dim of X and dim of Y are not match!!\\n&#8217;)<\/p>\n<p style=\"direction: ltr;\">quit cancel<\/p>\n<p style=\"direction: ltr;\">end<\/p>\n<p style=\"direction: ltr;\">beta0 = 0;<\/p>\n<p style=\"direction: ltr;\">beta= zeros(p,1);<\/p>\n<p style=\"direction: ltr;\">crit = 1;<\/p>\n<p style=\"direction: ltr;\">step = 0;<\/p>\n<p style=\"direction: ltr;\">while(crit &gt; 1E-5)<\/p>\n<p style=\"direction: ltr;\">step = step + 1;<\/p>\n<p style=\"direction: ltr;\">obj_ini = 1\/2*sum((Y &#8211; X * beta -beta0).^2) + lambda * sum(abs(beta));<\/p>\n<p style=\"direction: ltr;\">beta0 = mean(Y &#8211; X*beta);<\/p>\n<p style=\"direction: ltr;\">for(j = 1:p)<\/p>\n<p style=\"direction: ltr;\">a = sum(X(:,j).^2);<\/p>\n<p style=\"direction: ltr;\">b = sum((Y &#8211; X*beta).* X(:,j)) + beta(j) * a;<\/p>\n<p style=\"direction: ltr;\">if lambda &gt;= abs(b)<\/p>\n<p style=\"direction: ltr;\">beta(j) = 0;<\/p>\n<p style=\"direction: ltr;\">else<\/p>\n<p style=\"direction: ltr;\">if b&gt;0<\/p>\n<p style=\"direction: ltr;\">beta(j) = (b &#8211; lambda) \/ a;<\/p>\n<p style=\"direction: ltr;\">else<\/p>\n<p style=\"direction: ltr;\">beta(j) = (b+lambda) \/a;<\/p>\n<p style=\"direction: ltr;\">end<\/p>\n<p style=\"direction: ltr;\">end<\/p>\n<p style=\"direction: ltr;\">end<\/p>\n<p style=\"direction: ltr;\">obj_now = 1\/2*sum((Y &#8211; X * beta -beta0).^2) + lambda * sum(abs(beta));<\/p>\n<p style=\"direction: ltr;\">crit = abs(obj_now &#8211; obj_ini) \/ abs(obj_ini);<\/p>\n<p style=\"direction: ltr;\">end<\/p>\n<p style=\"direction: ltr;\">\n<p style=\"direction: ltr;\">\n<p style=\"direction: ltr;\">\u0645\u062b\u0627\u0644 \u062e\u0631\u0648\u062c\u06cc :<\/p>\n<p style=\"direction: ltr;\">\n<p style=\"direction: ltr;\"><a href=\"http:\/\/vua.nadiran.com\/wp-content\/uploads\/example-regress-matlab.png\"><img loading=\"lazy\" decoding=\"async\" class=\"aligncenter size-full wp-image-278\" alt=\"example-regress-matlab\" src=\"http:\/\/vua.nadiran.com\/wp-content\/uploads\/example-regress-matlab.png\" width=\"471\" height=\"342\" srcset=\"https:\/\/vua.nadiran.com\/wp-content\/uploads\/example-regress-matlab.png 471w, https:\/\/vua.nadiran.com\/wp-content\/uploads\/example-regress-matlab-300x217.png 300w\" sizes=\"(max-width: 471px) 100vw, 471px\" \/><\/a><\/p>\n","protected":false},"excerpt":{"rendered":"<p>## Practical session: Linear regression ## Jean-Philippe.Vert@mines.org ## ## In this practical session to test several linear regression methods on the prostate cancer dataset ## #################################### ## Prepare data #################################### # Download prostate data con = url (&#8220;http:\/\/www-stat.stanford.edu\/~tibs\/ElemStatLearn\/datasets\/prostate.data&#8221;) prost=read.csv(con,row.names=1,sep=&#8221;\\t&#8221;) # Alternatively, load the file and read from local file as follows # prost=read.csv(&#8216;prostate.data.txt&#8217;,row.names=1,sep=&#8221;\\t&#8221;) # Scale <a href='https:\/\/vua.nadiran.com\/?p=274' class='excerpt-more'>[&#8230;]<\/a><\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"footnotes":""},"categories":[19],"tags":[],"class_list":["post-274","post","type-post","status-publish","format-standard","hentry","category-19","category-19-id","post-seq-1","post-parity-odd","meta-position-corners","fix"],"_links":{"self":[{"href":"https:\/\/vua.nadiran.com\/index.php?rest_route=\/wp\/v2\/posts\/274"}],"collection":[{"href":"https:\/\/vua.nadiran.com\/index.php?rest_route=\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/vua.nadiran.com\/index.php?rest_route=\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/vua.nadiran.com\/index.php?rest_route=\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/vua.nadiran.com\/index.php?rest_route=%2Fwp%2Fv2%2Fcomments&post=274"}],"version-history":[{"count":4,"href":"https:\/\/vua.nadiran.com\/index.php?rest_route=\/wp\/v2\/posts\/274\/revisions"}],"predecessor-version":[{"id":276,"href":"https:\/\/vua.nadiran.com\/index.php?rest_route=\/wp\/v2\/posts\/274\/revisions\/276"}],"wp:attachment":[{"href":"https:\/\/vua.nadiran.com\/index.php?rest_route=%2Fwp%2Fv2%2Fmedia&parent=274"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/vua.nadiran.com\/index.php?rest_route=%2Fwp%2Fv2%2Fcategories&post=274"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/vua.nadiran.com\/index.php?rest_route=%2Fwp%2Fv2%2Ftags&post=274"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}