diff options
Diffstat (limited to 'R_LinR')
20 files changed, 909 insertions, 4 deletions
diff --git a/R_LinR/Rplots.pdf b/R_LinR/Rplots.pdf Binary files differindex 778337e..c96086d 100644 --- a/R_LinR/Rplots.pdf +++ b/R_LinR/Rplots.pdf diff --git a/R_LinR/aaaaaa.md b/R_LinR/aaaaaa.md new file mode 100644 index 0000000..8ad7cd1 --- /dev/null +++ b/R_LinR/aaaaaa.md @@ -0,0 +1,23 @@ +t-value estimate / std_error + +R^2 = 1 - SS_{res}/SS_{tot} + +SS_{rss} = sum((y_i - y_pred)^2) # residual sum of squares +SS_{tot} = sum((y_i - y_mean)^2) + + +F = ((SS_{tot} - SS_{res})/p)/(SS_{res}/(n-p-1)) +n - number of points +p - number of inputs + +# https://www.rose-hulman.edu/class/ma/inlow/Math485/ftests.pdf + + + +p-value -> 0 high significance + +adjusted R^2 = 1 - (1-R^2)(n-1)/(n-p-1) - it adjusts the r-squared value by penalizing the inclusion of irrelevant, by which we mean highly collinear, variables. It does this by taking into account both the number of predictors and the sample size. + + + +https://mjt.cs.illinois.edu/ml/lec2.pdf
\ No newline at end of file diff --git a/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1.zip b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1.zip Binary files differnew file mode 100644 index 0000000..0172afd --- /dev/null +++ b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1.zip diff --git a/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/ex1.pdf b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/ex1.pdf Binary files differnew file mode 100644 index 0000000..f07bfe8 --- /dev/null +++ b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/ex1.pdf diff --git a/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/computeCost.m b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/computeCost.m new file mode 100644 index 0000000..3dc1543 --- /dev/null +++ b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/computeCost.m @@ -0,0 +1,23 @@ +function J = computeCost(X, y, theta) +%COMPUTECOST Compute cost for linear regression +% J = COMPUTECOST(X, y, theta) computes the cost of using theta as the +% parameter for linear regression to fit the data points in X and y + +% Initialize some useful values +m = length(y); % number of training examples + +% You need to return the following variables correctly +J = 0; + +% ====================== YOUR CODE HERE ====================== +% Instructions: Compute the cost of a particular choice of theta +% You should set J to the cost. + +# https://www.kaggle.com/discussions/general/156593 +# dot - element by element operation +J = 1 / 2 / m * sum((X * theta - y) .^ 2); + + +% ========================================================================= + +end diff --git a/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/computeCostMulti.m b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/computeCostMulti.m new file mode 100644 index 0000000..3dc9b06 --- /dev/null +++ b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/computeCostMulti.m @@ -0,0 +1,22 @@ +function J = computeCostMulti(X, y, theta) +%COMPUTECOSTMULTI Compute cost for linear regression with multiple variables +% J = COMPUTECOSTMULTI(X, y, theta) computes the cost of using theta as the +% parameter for linear regression to fit the data points in X and y + +% Initialize some useful values +m = length(y); % number of training examples + +% You need to return the following variables correctly +J = 0; + +% ====================== YOUR CODE HERE ====================== +% Instructions: Compute the cost of a particular choice of theta +% You should set J to the cost. +J = 1 / 2 / m * sum((X * theta - y) .^ 2); + + + + +% ========================================================================= + +end diff --git a/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/ex1.m b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/ex1.m new file mode 100644 index 0000000..f3aacb5 --- /dev/null +++ b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/ex1.m @@ -0,0 +1,122 @@ +%% Machine Learning Online Class - Exercise 1: Linear Regression + +% Instructions +% ------------ +% +% This file contains code that helps you get started on the +% linear exercise. You will need to complete the following functions +% in this exericse: +% +% warmUpExercise.m +% plotData.m +% gradientDescent.m +% computeCost.m +% gradientDescentMulti.m +% computeCostMulti.m +% featureNormalize.m +% normalEqn.m +% +% For this exercise, you will not need to change any code in this file, +% or any other files other than those mentioned above. +% +% x refers to the population size in 10,000s +% y refers to the profit in $10,000s +% + +%% Initialization +clear all; close all; clc + +%% ==================== Part 1: Basic Function ==================== +% Complete warmUpExercise.m +fprintf('Running warmUpExercise ... \n'); +fprintf('5x5 Identity Matrix: \n'); +warmUpExercise() + +fprintf('Program paused. Press enter to continue.\n'); +pause; + + +%% ======================= Part 2: Plotting ======================= +fprintf('Plotting Data ...\n') +data = load('ex1data1.txt'); +X = data(:, 1); y = data(:, 2); +m = length(y); % number of training examples + +% Plot Data +% Note: You have to complete the code in plotData.m +plotData(X, y); + +fprintf('Program paused. Press enter to continue.\n'); +pause; + +%% =================== Part 3: Gradient descent =================== +fprintf('Running Gradient Descent ...\n') + +X = [ones(m, 1), data(:,1)]; % Add a column of ones to x +theta = zeros(2, 1); % initialize fitting parameters + +% Some gradient descent settings +iterations = 1500; +alpha = 0.01; + +% compute and display initial cost +computeCost(X, y, theta) + +% run gradient descent +theta = gradientDescent(X, y, theta, alpha, iterations); + +% print theta to screen +fprintf('Theta found by gradient descent: '); +fprintf('%f %f \n', theta(1), theta(2)); + +% Plot the linear fit +hold on; % keep previous plot visible +plot(X(:,2), X*theta, '-') +legend('Training data', 'Linear regression') +hold off % don't overlay any more plots on this figure + +% Predict values for population sizes of 35,000 and 70,000 +predict1 = [1, 3.5] *theta; +fprintf('For population = 35,000, we predict a profit of %f\n',... + predict1*10000); +predict2 = [1, 7] * theta; +fprintf('For population = 70,000, we predict a profit of %f\n',... + predict2*10000); + +fprintf('Program paused. Press enter to continue.\n'); +pause; + +%% ============= Part 4: Visualizing J(theta_0, theta_1) ============= +fprintf('Visualizing J(theta_0, theta_1) ...\n') + +% Grid over which we will calculate J +theta0_vals = linspace(-10, 10, 100); +theta1_vals = linspace(-1, 4, 100); + +% initialize J_vals to a matrix of 0's +J_vals = zeros(length(theta0_vals), length(theta1_vals)); + +% Fill out J_vals +for i = 1:length(theta0_vals) + for j = 1:length(theta1_vals) + t = [theta0_vals(i); theta1_vals(j)]; + J_vals(i,j) = computeCost(X, y, t); + end +end + + +% Because of the way meshgrids work in the surf command, we need to +% transpose J_vals before calling surf, or else the axes will be flipped +J_vals = J_vals'; +% Surface plot +figure; +surf(theta0_vals, theta1_vals, J_vals) +xlabel('\theta_0'); ylabel('\theta_1'); + +% Contour plot +figure; +% Plot J_vals as 15 contours spaced logarithmically between 0.01 and 100 +contour(theta0_vals, theta1_vals, J_vals, logspace(-2, 3, 20)) +xlabel('\theta_0'); ylabel('\theta_1'); +hold on; +plot(theta(1), theta(2), 'rx', 'MarkerSize', 10, 'LineWidth', 2); diff --git a/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/ex1_multi.m b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/ex1_multi.m new file mode 100644 index 0000000..7a8ceab --- /dev/null +++ b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/ex1_multi.m @@ -0,0 +1,160 @@ +%% Machine Learning Online Class +% Exercise 1: Linear regression with multiple variables +% +% Instructions +% ------------ +% +% This file contains code that helps you get started on the +% linear regression exercise. +% +% You will need to complete the following functions in this +% exericse: +% +% warmUpExercise.m +% plotData.m +% gradientDescent.m +% computeCost.m +% gradientDescentMulti.m +% computeCostMulti.m +% featureNormalize.m +% normalEqn.m +% +% For this part of the exercise, you will need to change some +% parts of the code below for various experiments (e.g., changing +% learning rates). +% + +%% Initialization + +%% ================ Part 1: Feature Normalization ================ + +%% Clear and Close Figures +clear all; close all; clc + +fprintf('Loading data ...\n'); + +%% Load Data +data = load('ex1data2.txt'); +X = data(:, 1:2); +y = data(:, 3); +m = length(y); + +% Print out some data points +fprintf('First 10 examples from the dataset: \n'); +fprintf(' x = [%.0f %.0f], y = %.0f \n', [X(1:10,:) y(1:10,:)]'); + +fprintf('Program paused. Press enter to continue.\n'); +pause; + +% Scale features and set them to zero mean +fprintf('Normalizing Features ...\n'); + +[X mu sigma] = featureNormalize(X); + +% Add intercept term to X +X = [ones(m, 1) X]; + + +%% ================ Part 2: Gradient Descent ================ + +% ====================== YOUR CODE HERE ====================== +% Instructions: We have provided you with the following starter +% code that runs gradient descent with a particular +% learning rate (alpha). +% +% Your task is to first make sure that your functions - +% computeCost and gradientDescent already work with +% this starter code and support multiple variables. +% +% After that, try running gradient descent with +% different values of alpha and see which one gives +% you the best result. +% +% Finally, you should complete the code at the end +% to predict the price of a 1650 sq-ft, 3 br house. +% +% Hint: By using the 'hold on' command, you can plot multiple +% graphs on the same figure. +% +% Hint: At prediction, make sure you do the same feature normalization. +% + +fprintf('Running gradient descent ...\n'); + +% Choose some alpha value +alpha = 0.01; +num_iters = 100; + +% Init Theta and Run Gradient Descent +theta = zeros(3, 1); +[theta, J_history] = gradientDescentMulti(X, y, theta, alpha, num_iters); + +% Plot the convergence graph +figure; +plot(1:numel(J_history), J_history, '-b', 'LineWidth', 2); +xlabel('Number of iterations'); +ylabel('Cost J'); + +% Display gradient descent's result +fprintf('Theta computed from gradient descent: \n'); +fprintf(' %f \n', theta); +fprintf('\n'); + +% Estimate the price of a 1650 sq-ft, 3 br house +% ====================== YOUR CODE HERE ====================== +% Recall that the first column of X is all-ones. Thus, it does +% not need to be normalized. +price = 0; % You should change this + + +% ============================================================ + +fprintf(['Predicted price of a 1650 sq-ft, 3 br house ' ... + '(using gradient descent):\n $%f\n'], price); + +fprintf('Program paused. Press enter to continue.\n'); +pause; + +%% ================ Part 3: Normal Equations ================ + +fprintf('Solving with normal equations...\n'); + +% ====================== YOUR CODE HERE ====================== +% Instructions: The following code computes the closed form +% solution for linear regression using the normal +% equations. You should complete the code in +% normalEqn.m +% +% After doing so, you should complete this code +% to predict the price of a 1650 sq-ft, 3 br house. +% + +%% Load Data +data = csvread('ex1data2.txt'); +X = data(:, 1:2); +y = data(:, 3); +m = length(y); + +% Add intercept term to X +X = [ones(m, 1) X]; + +% Calculate the parameters from the normal equation +theta = normalEqn(X, y); + +% Display normal equation's result +fprintf('Theta computed from the normal equations: \n'); +fprintf(' %f \n', theta); +fprintf('\n'); + + +% Estimate the price of a 1650 sq-ft, 3 br house +% ====================== YOUR CODE HERE ====================== +area = 1650; % You should change this +br = 3; +price = [1, area, br] * theta; + +% ============================================================ + +fprintf(['Predicted price of a 1650 sq-ft, 3 br house ' ... + '(using normal equations):\n $%f\n'], price); + diff --git a/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/featureNormalize.m b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/featureNormalize.m new file mode 100644 index 0000000..ea5c7d2 --- /dev/null +++ b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/featureNormalize.m @@ -0,0 +1,41 @@ +function [X_norm, mu, sigma] = featureNormalize(X) +%FEATURENORMALIZE Normalizes the features in X +% FEATURENORMALIZE(X) returns a normalized version of X where +% the mean value of each feature is 0 and the standard deviation +% is 1. This is often a good preprocessing step to do when +% working with learning algorithms. + +% You need to set these values correctly +% X_norm = X; +mu = zeros(1, size(X, 2)); +sigma = zeros(1, size(X, 2)); + +% ====================== YOUR CODE HERE ====================== +% Instructions: First, for each feature dimension, compute the mean +% of the feature and subtract it from the dataset, +% storing the mean value in mu. Next, compute the +% standard deviation of each feature and divide +% each feature by it's standard deviation, storing +% the standard deviation in sigma. +% +% Note that X is a matrix where each column is a +% feature and each row is an example. You need +% to perform the normalization separately for +% each feature. +% +% Hint: You might find the 'mean' and 'std' functions useful. +% + + + +mu = mean(X); +sigma = std(X); +X_norm = (X - mu) ./ sigma; + + + + + +% ============================================================ + +end diff --git a/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/gradientDescent.m b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/gradientDescent.m new file mode 100644 index 0000000..b74dd2c --- /dev/null +++ b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/gradientDescent.m @@ -0,0 +1,34 @@ +function [theta, J_history] = gradientDescent(X, y, theta, alpha, num_iters) +%GRADIENTDESCENT Performs gradient descent to learn theta +% theta = GRADIENTDESENT(X, y, theta, alpha, num_iters) updates theta by +% taking num_iters gradient steps with learning rate alpha + +% Initialize some useful values +m = length(y); % number of training examples +J_history = zeros(num_iters, 1); + +for iter = 1:num_iters + + % ====================== YOUR CODE HERE ====================== + % Instructions: Perform a single gradient step on the parameter vector + % theta. + % + % Hint: While debugging, it can be useful to print out the values + % of the cost function (computeCost) and gradient here. + % + + +# X' - transpose +# https://mjt.cs.illinois.edu/ml/lec2.pdf +theta = theta - alpha / m * (X' * (X * theta - y)); + + + + % ============================================================ + + % Save the cost J in every iteration + J_history(iter) = computeCost(X, y, theta); + +end + +end diff --git a/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/gradientDescentMulti.m b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/gradientDescentMulti.m new file mode 100644 index 0000000..99a10aa --- /dev/null +++ b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/gradientDescentMulti.m @@ -0,0 +1,37 @@ +function [theta, J_history] = gradientDescentMulti(X, y, theta, alpha, num_iters) +%GRADIENTDESCENTMULTI Performs gradient descent to learn theta +% theta = GRADIENTDESCENTMULTI(x, y, theta, alpha, num_iters) updates theta by +% taking num_iters gradient steps with learning rate alpha + +% Initialize some useful values +m = length(y); % number of training examples +J_history = zeros(num_iters, 1); + +for iter = 1:num_iters + + % ====================== YOUR CODE HERE ====================== + % Instructions: Perform a single gradient step on the parameter vector + % theta. + % + % Hint: While debugging, it can be useful to print out the values + % of the cost function (computeCostMulti) and gradient here. + % + + + + + +theta = theta - alpha / m * (X' * (X * theta - y)); + + + + + + % ============================================================ + + % Save the cost J in every iteration + J_history(iter) = computeCostMulti(X, y, theta); + +end + +end diff --git a/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/normalEqn.m b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/normalEqn.m new file mode 100644 index 0000000..56984a3 --- /dev/null +++ b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/normalEqn.m @@ -0,0 +1,25 @@ +function [theta] = normalEqn(X, y) +%NORMALEQN Computes the closed-form solution to linear regression +% NORMALEQN(X,y) computes the closed-form solution to linear +% regression using the normal equations. + +theta = zeros(size(X, 2), 1); + +% ====================== YOUR CODE HERE ====================== +% Instructions: Complete the code to compute the closed form solution +% to linear regression and put the result in theta. +% + +% ---------------------- Sample Solution ---------------------- + +theta = pinv(X' * X) * X' * y; + + + + +% ------------------------------------------------------------- + + +% ============================================================ + +end diff --git a/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/octave-workspace b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/octave-workspace Binary files differnew file mode 100644 index 0000000..787b037 --- /dev/null +++ b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/octave-workspace diff --git a/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/plotData.m b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/plotData.m new file mode 100644 index 0000000..ba825d0 --- /dev/null +++ b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/plotData.m @@ -0,0 +1,27 @@ +function plotData(x, y) +%PLOTDATA Plots the data points x and y into a new figure +% PLOTDATA(x,y) plots the data points and gives the figure axes labels of +% population and profit. + +% ====================== YOUR CODE HERE ====================== +% Instructions: Plot the training data into a figure using the +% "figure" and "plot" commands. Set the axes labels using +% the "xlabel" and "ylabel" commands. Assume the +% population and revenue data have been passed in +% as the x and y arguments of this function. +% +% Hint: You can use the 'rx' option with plot to have the markers +% appear as red crosses. Furthermore, you can make the +% markers larger by using plot(..., 'rx', 'MarkerSize', 10); + +figure; % open a new figure window + + +plot(x, y, 'rx', 'MarkerSize', 10); +xlabel('population'); +ylabel('profit'); + + +% ============================================================ + +end diff --git a/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/submit.m b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/submit.m new file mode 100644 index 0000000..48f3e71 --- /dev/null +++ b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/submit.m @@ -0,0 +1,336 @@ +function submit(part) +%SUBMIT Submit your code and output to the ml-class servers +% SUBMIT() will connect to the ml-class server and submit your solution + + fprintf('==\n== [ml-class] Submitting Solutions | Programming Exercise %s\n==\n', ... + homework_id()); + if ~exist('part', 'var') || isempty(part) + partId = promptPart(); + end + + % Check valid partId + partNames = validParts(); + if ~isValidPartId(partId) + fprintf('!! Invalid homework part selected.\n'); + fprintf('!! Expected an integer from 1 to %d.\n', numel(partNames) + 1); + fprintf('!! Submission Cancelled\n'); + return + end + + [login password] = loginPrompt(); + if isempty(login) + fprintf('!! Submission Cancelled\n'); + return + end + + fprintf('\n== Connecting to ml-class ... '); + if exist('OCTAVE_VERSION') + fflush(stdout); + end + + % Setup submit list + if partId == numel(partNames) + 1 + submitParts = 1:numel(partNames); + else + submitParts = [partId]; + end + + for s = 1:numel(submitParts) + % Submit this part + partId = submitParts(s); + + % Get Challenge + [login, ch, signature] = getChallenge(login); + if isempty(login) || isempty(ch) || isempty(signature) + % Some error occured, error string in first return element. + fprintf('\n!! Error: %s\n\n', login); + return + end + + % Attempt Submission with Challenge + ch_resp = challengeResponse(login, password, ch); + [result, str] = submitSolution(login, ch_resp, partId, output(partId), ... + source(partId), signature); + + fprintf('\n== [ml-class] Submitted Homework %s - Part %d - %s\n', ... + homework_id(), partId, partNames{partId}); + fprintf('== %s\n', strtrim(str)); + if exist('OCTAVE_VERSION') + fflush(stdout); + end + end + +end + +% ================== CONFIGURABLES FOR EACH HOMEWORK ================== + +function id = homework_id() + id = '1'; +end + +function [partNames] = validParts() + partNames = { 'Warm up exercise ', ... + 'Computing Cost (for one variable)', ... + 'Gradient Descent (for one variable)', ... + 'Feature Normalization', ... + 'Computing Cost (for multiple variables)', ... + 'Gradient Descent (for multiple variables)', ... + 'Normal Equations'}; +end + +function srcs = sources() + % Separated by part + srcs = { { 'warmUpExercise.m' }, ... + { 'computeCost.m' }, ... + { 'gradientDescent.m' }, ... + { 'featureNormalize.m' }, ... + { 'computeCostMulti.m' }, ... + { 'gradientDescentMulti.m' }, ... + { 'normalEqn.m' }, ... + }; +end + +function out = output(partId) + % Random Test Cases + X1 = [ones(20,1) (exp(1) + exp(2) * (0.1:0.1:2))']; + Y1 = X1(:,2) + sin(X1(:,1)) + cos(X1(:,2)); + X2 = [X1 X1(:,2).^0.5 X1(:,2).^0.25]; + Y2 = Y1.^0.5 + Y1; + if partId == 1 + out = sprintf('%0.5f ', warmUpExercise()); + elseif partId == 2 + out = sprintf('%0.5f ', computeCost(X1, Y1, [0.5 -0.5]')); + elseif partId == 3 + out = sprintf('%0.5f ', gradientDescent(X1, Y1, [0.5 -0.5]', 0.01, 10)); + elseif partId == 4 + out = sprintf('%0.5f ', featureNormalize(X2(:,2:4))); + elseif partId == 5 + out = sprintf('%0.5f ', computeCostMulti(X2, Y2, [0.1 0.2 0.3 0.4]')); + elseif partId == 6 + out = sprintf('%0.5f ', gradientDescentMulti(X2, Y2, [-0.1 -0.2 -0.3 -0.4]', 0.01, 10)); + elseif partId == 7 + out = sprintf('%0.5f ', normalEqn(X2, Y2)); + end +end + +function url = challenge_url() + url = 'http://www.ml-class.org/course/homework/challenge'; +end + +function url = submit_url() + url = 'http://www.ml-class.org/course/homework/submit'; +end + +% ========================= CHALLENGE HELPERS ========================= + +function src = source(partId) + src = ''; + src_files = sources(); + if partId <= numel(src_files) + flist = src_files{partId}; + for i = 1:numel(flist) + fid = fopen(flist{i}); + while ~feof(fid) + line = fgets(fid); + src = [src line]; + end + src = [src '||||||||']; + end + end +end + +function ret = isValidPartId(partId) + partNames = validParts(); + ret = (~isempty(partId)) && (partId >= 1) && (partId <= numel(partNames) + 1); +end + +function partId = promptPart() + fprintf('== Select which part(s) to submit:\n', ... + homework_id()); + partNames = validParts(); + srcFiles = sources(); + for i = 1:numel(partNames) + fprintf('== %d) %s [', i, partNames{i}); + fprintf(' %s ', srcFiles{i}{:}); + fprintf(']\n'); + end + fprintf('== %d) All of the above \n==\nEnter your choice [1-%d]: ', ... + numel(partNames) + 1, numel(partNames) + 1); + selPart = input('', 's'); + partId = str2num(selPart); + if ~isValidPartId(partId) + partId = -1; + end +end + +function [email,ch,signature] = getChallenge(email) + str = urlread(challenge_url(), 'post', {'email_address', email}); + + str = strtrim(str); + [email, str] = strtok (str, '|'); + [ch, str] = strtok (str, '|'); + [signature, str] = strtok (str, '|'); +end + + +function [result, str] = submitSolution(email, ch_resp, part, output, ... + source, signature) + + params = {'homework', homework_id(), ... + 'part', num2str(part), ... + 'email', email, ... + 'output', output, ... + 'source', source, ... + 'challenge_response', ch_resp, ... + 'signature', signature}; + + str = urlread(submit_url(), 'post', params); + + % Parse str to read for success / failure + result = 0; + +end + +% =========================== LOGIN HELPERS =========================== + +function [login password] = loginPrompt() + % Prompt for password + [login password] = basicPrompt(); + + if isempty(login) || isempty(password) + login = []; password = []; + end +end + + +function [login password] = basicPrompt() + login = input('Login (Email address): ', 's'); + password = input('Password: ', 's'); +end + + +function [str] = challengeResponse(email, passwd, challenge) + salt = ')~/|]QMB3[!W`?OVt7qC"@+}'; + str = sha1([challenge sha1([salt email passwd])]); + sel = randperm(numel(str)); + sel = sort(sel(1:16)); + str = str(sel); +end + + +% =============================== SHA-1 ================================ + +function hash = sha1(str) + + % Initialize variables + h0 = uint32(1732584193); + h1 = uint32(4023233417); + h2 = uint32(2562383102); + h3 = uint32(271733878); + h4 = uint32(3285377520); + + % Convert to word array + strlen = numel(str); + + % Break string into chars and append the bit 1 to the message + mC = [double(str) 128]; + mC = [mC zeros(1, 4-mod(numel(mC), 4), 'uint8')]; + + numB = strlen * 8; + if exist('idivide') + numC = idivide(uint32(numB + 65), 512, 'ceil'); + else + numC = ceil(double(numB + 65)/512); + end + numW = numC * 16; + mW = zeros(numW, 1, 'uint32'); + + idx = 1; + for i = 1:4:strlen + 1 + mW(idx) = bitor(bitor(bitor( ... + bitshift(uint32(mC(i)), 24), ... + bitshift(uint32(mC(i+1)), 16)), ... + bitshift(uint32(mC(i+2)), 8)), ... + uint32(mC(i+3))); + idx = idx + 1; + end + + % Append length of message + mW(numW - 1) = uint32(bitshift(uint64(numB), -32)); + mW(numW) = uint32(bitshift(bitshift(uint64(numB), 32), -32)); + + % Process the message in successive 512-bit chs + for cId = 1 : double(numC) + cSt = (cId - 1) * 16 + 1; + cEnd = cId * 16; + ch = mW(cSt : cEnd); + + % Extend the sixteen 32-bit words into eighty 32-bit words + for j = 17 : 80 + ch(j) = ch(j - 3); + ch(j) = bitxor(ch(j), ch(j - 8)); + ch(j) = bitxor(ch(j), ch(j - 14)); + ch(j) = bitxor(ch(j), ch(j - 16)); + ch(j) = bitrotate(ch(j), 1); + end + + % Initialize hash value for this ch + a = h0; + b = h1; + c = h2; + d = h3; + e = h4; + + % Main loop + for i = 1 : 80 + if(i >= 1 && i <= 20) + f = bitor(bitand(b, c), bitand(bitcmp(b), d)); + k = uint32(1518500249); + elseif(i >= 21 && i <= 40) + f = bitxor(bitxor(b, c), d); + k = uint32(1859775393); + elseif(i >= 41 && i <= 60) + f = bitor(bitor(bitand(b, c), bitand(b, d)), bitand(c, d)); + k = uint32(2400959708); + elseif(i >= 61 && i <= 80) + f = bitxor(bitxor(b, c), d); + k = uint32(3395469782); + end + + t = bitrotate(a, 5); + t = bitadd(t, f); + t = bitadd(t, e); + t = bitadd(t, k); + t = bitadd(t, ch(i)); + e = d; + d = c; + c = bitrotate(b, 30); + b = a; + a = t; + + end + h0 = bitadd(h0, a); + h1 = bitadd(h1, b); + h2 = bitadd(h2, c); + h3 = bitadd(h3, d); + h4 = bitadd(h4, e); + + end + + hash = reshape(dec2hex(double([h0 h1 h2 h3 h4]), 8)', [1 40]); + + hash = lower(hash); + +end + +function ret = bitadd(iA, iB) + ret = double(iA) + double(iB); + ret = bitset(ret, 33, 0); + ret = uint32(ret); +end + +function ret = bitrotate(iA, places) + t = bitshift(iA, places - 32); + ret = bitshift(iA, places); + ret = bitor(ret, t); +end diff --git a/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/warmUpExercise.m b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/warmUpExercise.m new file mode 100644 index 0000000..5133fa5 --- /dev/null +++ b/R_LinR/asset-v1_MEPhIx+CSA12AI+2019Spring+type@asset+block@mlclass-ex1/mlclass-ex1/warmUpExercise.m @@ -0,0 +1,21 @@ +function A = warmUpExercise() +%WARMUPEXERCISE Example function in octave +% A = WARMUPEXERCISE() is an example function that returns the 5x5 identity matrix + +A = []; +% ============= YOUR CODE HERE ============== +% Instructions: Return the 5x5 identity matrix +% In octave, we return values by defining which variables +% represent the return values (at the top of the file) +% and then set them accordingly. + + +# create diagonal matrix +A = eye(5); + + + +% =========================================== + + +end diff --git a/R_LinR/ftests.pdf b/R_LinR/ftests.pdf Binary files differnew file mode 100644 index 0000000..dc634ab --- /dev/null +++ b/R_LinR/ftests.pdf diff --git a/R_LinR/image1.png b/R_LinR/image1.png Binary files differnew file mode 100644 index 0000000..ea6e914 --- /dev/null +++ b/R_LinR/image1.png diff --git a/R_LinR/image2.png b/R_LinR/image2.png Binary files differnew file mode 100644 index 0000000..893d4d7 --- /dev/null +++ b/R_LinR/image2.png diff --git a/R_LinR/main.r b/R_LinR/main.r index d7b5856..3bb98fb 100755 --- a/R_LinR/main.r +++ b/R_LinR/main.r @@ -5,7 +5,7 @@ df <- subset(df, sex == "F") df <- subset(df, select = -c(zcta, sex)) df <- subset(df, 8 < meaneducation & meaneducation < 18) df <- subset(df, 10000 < meanhouseholdincome & meanhouseholdincome < 200000) -df <- subset(df, 10000 < meanhouseholdincome & meanhouseholdincome < 200000) +df <- subset(df, 0 < meanemployment & meanemployment < 3) df <- subset(df, 20 < meanage & meanage < 60) df$log_income <- log10(df$meanhouseholdincome) @@ -15,12 +15,46 @@ names(df) <- c("X", "age", "education", "employment", "income", "log_income") library(ggplot2) -ggplot(df, aes(x = age, y = log_income)) + - geom_point(alpha = 0.2) + - labs(x = "age", y = "income", title = "log_income(age)") +# b model <- lm(log_income ~ age, df) +# a +ggplot(df, aes(x = age, y = log_income)) + + geom_point(alpha = 0.2) + + geom_abline( + intercept = coef(model)[1], + slope = coef(model)[2], + color = "red", + size = 1 + ) + + labs(x = "age", y = "income", title = "log_income(age)") + +# bcde print(model) summary(model) + +model_2 <- lm(log_income ~ education, df) +print(model_2) +summary(model_2) + +ggplot(df, aes(x = education, y = log_income)) + + geom_point(alpha = 0.2) + + geom_abline( + intercept = coef(model_2)[1], + slope = coef(model_2)[2], + color = "red", + size = 1 + ) + + labs(x = "education", y = "income", title = "log_income(education)") + + +model_3 <- lm(log_income ~ education + age + employment, df) +print(model_3) +summary(model_3) + +ggplot(df) + + geom_point(aes(x = log_income, y = fitted(model_3)), alpha = 0.2) + + geom_line(aes(x = log_income, y = log_income), col = "red") + + labs(x = "actual", y = "predicted") |
