diff --git a/.gitignore b/.gitignore deleted file mode 100644 index b7481a57..00000000 --- a/.gitignore +++ /dev/null @@ -1,108 +0,0 @@ -# Created by .ignore support plugin (hsz.mobi) -### Python template -# Byte-compiled / optimized / DLL files -__pycache__/ -*.py[cod] -*$py.class - -# C extensions -*.so - -# Distribution / packaging -.Python -env/ -build/ -develop-eggs/ -dist/ -downloads/ -eggs/ -.eggs/ -lib/ -lib64/ -parts/ -sdist/ -var/ -wheels/ -*.egg-info/ -.installed.cfg -*.egg - -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - -# Installer logs -pip-log.txt -pip-delete-this-directory.txt - -# Unit test / coverage reports -htmlcov/ -.tox/ -.coverage -.coverage.* -.cache -nosetests.xml -coverage.xml -*,cover -.hypothesis/ - -# Translations -*.mo -*.pot - -# Django stuff: -*.log -local_settings.py - -# Flask stuff: -instance/ -.webassets-cache - -# Scrapy stuff: -.scrapy - -# Sphinx documentation -docs/_build/ - -# PyBuilder -target/ - -# Jupyter Notebook -.ipynb_checkpoints - -# pyenv -.python-version - -# celery beat schedule file -celerybeat-schedule - -# SageMath parsed files -*.sage.py - -# dotenv -.env - -# virtualenv -.venv -venv/ -ENV/ - -# Spyder project settings -.spyderproject - -# Rope project settings -.ropeproject - -/forward_sample_NO_NA.csv -/key_stats_NO_NA_enhanced.csv -/YAHOO-INDEX_GSPC.csv -/stock_prices.csv -.gitignore -.idea/ -YAHOO-INDEX_GSPC.csv -forward/ -forward_sample_NO_NA.csv -key_stats_NO_NA_enhanced.csv -stock_prices.csv diff --git a/README.md b/README.md index cb18cfdd..d006cdbf 100644 --- a/README.md +++ b/README.md @@ -1,97 +1,334 @@ -# MachineLearningStocks +# MachineLearningStocks in python: a starter project and guide + +MachineLearningStocks is designed to be an intuitive and highly extensible template project applying machine learning to making stock predictions. My hope is that this project will help you understand the overall workflow of using machine learning to predict stock movements and also appreciate some of the subtleties. And of course, after following this guide and playing around with the project, you should definitely make your own improvements -- if you're struggling to think of what to do, at the end of this readme I've included a long list of possiblilities: take your pick. + +Concretely, we will be cleaning and preparing a dataset of historical stock prices and fundamentals using `pandas`, after which we will apply a `scikit-learn` classifier to discover the relationship between stock fundamentals (e.g PE ratio, debt/equity, float, etc) and the subsequent annual price change (compared with the an index). We then conduct a simple backtest, before generating predictions on current data. + +While I would not live trade based off of the predictions from this exact code, I do believe that you can use this project as starting point for a profitable trading system – I have actually used code based on this project to live trade, with pretty decent results (around 20% returns on backtest and 10-15% on live trading). + +Though this project was originally based on Sentdex's excellent [machine learning tutorial](https://www.youtube.com/playlist?list=PLQVvvaa0QuDd0flgGphKCej-9jp-QdzZ3), it has quite a lot of personal significance for me. It was my first proper python project, one of my first real encounters with ML, and the first time I used git. At the start, my code was rife with bad practice and inefficiency: I have since tried to amend most of this, but please be warned that some minor issues may remain (feel free to raise an issue, or fork and submit a PR). Both the project and myself as a programmer have evolved a lot since the first iteration, and despite its origins in a youtube tutorial series I now think of it as 'my own'. + +*As a disclaimer, this is a purely educational project. Be aware that backtested performance may often be deceptive – trade at your own risk!* + +*At some stage, this guide will likely be cross-posted at my academic blog, [reasonabledeviations.science/](https://reasonabledeviations.science/)* + +## Contents + +- [Contents](#contents) +- [Overview](#overview) +- [Quickstart](#quickstart) +- [0. Preliminaries](#0-preliminaries) +- [1. Historical data](#1-historical-data) + - [Historical stock fundamentals](#historical-stock-fundamentals) + - [Historical price data](#historical-price-data) +- [2. Creating the training dataset](#2-creating-the-training-dataset) + - [Preprocessing historical price data](#preprocessing-historical-price-data) + - [Features](#features) + - [Valuation measures](#valuation-measures) + - [Financials](#financials) + - [Trading information](#trading-information) + - [Parsing](#parsing) +- [3. Backtesting](#3-backtesting) +- [4. Current fundamental data](#4-current-fundamental-data) +- [5. Stock prediction](#5-stock-prediction) +- [Unit testing](#unit-testing) +- [Where to go from here](#where-to-go-from-here) + - [Data acquisition](#data-acquisition) + - [Data preprocessing](#data-preprocessing) + - [Machine learning](#machine-learning) +- [Contributing](#contributing) -This project uses python and scikit-learn to make stock predictions. The code is based on Sentdex's excellent -[machine learning tutorial](https://www.youtube.com/playlist?list=PLQVvvaa0QuDd0flgGphKCej-9jp-QdzZ3). +## Overview -This was my first proper python project, as well as the first time I've used GitHub, so apologies for poor documentation and bad coding. +The overall workflow to use machine learning to make stocks prediction is as follows: -**Update as of February 2017:** Because a fair amount of people have expressed interest in this project, over the -next few weeks I am going to fix all the oustanding issues and modernise the project. +1. Acquire historical fundamental data -- these are the *features* or *predictors* +2. Acquire historical stock price data -- this is will make up the dependent variable, or label (what we are trying to predict). +3. Preprocess data +4. Use a machine learning model to learn from the data +5. Backtest the performance of the machine learning model +6. Acquire current fundamental data +7. Generate predictions from current fundamental data -**Update as of October 2017:** Basically, my coding ability has come a long way since I first wrote this project, as has my understanding of machine learning. -I have continuously been developing a second iteration of this project, which is FAR more robust and sophisticated (though the general idea remains the same). - I have better data, better machine learning algorithms, and correspondingly better performance. - At some stage, I may update this project on github, but I hope you understand my reluctance to give away alpha. - That being said, I suggest you use this project as a starting point: from experience, - I can tell you that on this backbone you can probably build a profitable trading strategy. +This is a very generalised overview, but in principle this is all you need to build a fundamentals-based ML stock predictor. -## Overview +## Quickstart + +If you want to throw away the instruction manual and play immediately, clone this project, then download and unzip the [data file](https://pythonprogramming.net/data-acquisition-machine-learning/) into the same directory. -The program looks at historical stock fundamentals (e.g PE ratio, Debt/equity etc), and also historical prices. The program then tries to 'learn' if there is any relationship between those fundamentals and the resulting change in price. +Then, run the following in terminal: + +```bash +pip install -r requirements.txt +python download_historical_prices.py +python parsing_keystats.py +python backtesting.py +python current_data.py +pytest -v +python stock_prediction.py +``` -Then, we feed in the current stock fundamentals. The program should then output a list of stocks which have 'good fundamentals', which in the past have corresponded to a price increase. +Otherwise, follow the step-by-step guide below. + +## 0. Preliminaries + +This project uses python 3, and the common data science libraries `pandas` and `scikit-learn`. A full list of requirements is included in the `requirements.txt` file. To install all of the requirements at once, run the following code into terminal: + +```bash +pip install -r requirements.txt +``` -Note that this repository *does not include* all the backtesting etc. It is just the final product. During the backtesting, I was getting returns of about 17%, which is quite a decent outperformance of the market. +To get started, clone this project and unzip it. This will become our working directory. -All stocks are US based, from the S&P500. This also behaves as our benchmark. +## 1. Historical data -## Data sources +Data acquisition and preprocessing is probably the hardest part of most machine learning projects. But it is a necessary evil, so it's best to not fret and just carry on. -We need three datasets: +For this project, we need three datasets: 1. Historical stock fundamentals -2. Historical stock price changes (including data for the S&P500). -3. Current stock prices. +2. Historical stock prices +3. Historical S&P500 prices + +We need the S&P500 index prices as a benchmark: a 5% stock growth does not mean much if the S&P500 grew 10% in that time period, so all stock returns must be compared to those of the index. ### Historical stock fundamentals -This is actually very difficult to find. However, it turns out that there is a way to parse it from yahoo finance. I will not go into details, because [Sentdex has done it for us](https://pythonprogramming.net/data-acquisition-machine-learning/). On this page you will be able to find a file called `intraQuarter.zip`. +Historical fundamental data is actually very difficult to find (for free, at least). Although sites like [Quandl](https://www.quandl.com/) do have datasets available, you often have to pay a pretty steep fee. + +It turns out that there is a way to parse this data, for free, from [Yahoo Finance](https://finance.yahoo.com/). I will not go into details, because [Sentdex has done it for us](https://pythonprogramming.net/data-acquisition-machine-learning/). On his page you will be able to find a file called `intraQuarter.zip`, which you should download, unzip, and place in your working directory. Relevant to this project is the subfolder called `_KeyStats`, which contains html files that hold stock fundamentals for all stocks in the S&P500 between 2003 and 2013, sorted by stock. However, at this stage, the data is unusable -- we will have to parse it into a nice csv file before we can do any ML. -intraQuarter contains a subfolder called KeyStats, which contains fundamentals for all stocks in the S&P500 back to around 2003, sorted by stock. +### Historical price data -### Historical stock price changes +In the first iteration of the project, I used `pandas-datareader`, an extremely convenient library which can load stock data straight into `pandas`. However, after Yahoo Finance changed their UI, `datareader` no longer worked, so I switched to [Quandl](https://www.quandl.com/), which has free stock price data for a few tickers, and a python API. However, as `pandas-datareader` has been [fixed](https://github.com/ranaroussi/fix-yahoo-finance), we will use that instead. -For the historical stock prices, I used [Quandl](https://www.quandl.com/), which has a free python API. +Likewise, we can easily use `pandas-datareader` to access data for the SPY ticker. Failing that, one could manually download it from [yahoo finance](https://finance.yahoo.com/quote/%5EGSPC/history?p=%5EGSPC), place it into the project directory and rename it `sp500_index.csv`. -Quandl has nicely cleaned data, but more importantly its stock data has been adjusted to include things like share splits. +The code for downloading historical price data can be run by entering the following into terminal: + +```bash +python download_historical_prices.py +``` -The historical S&P500 values can be downloaded from [yahoo finance](https://finance.yahoo.com/quote/%5EGSPC/history?p=%5EGSPC), which we name `YAHOO-INDEX_GSPC.csv`. +## 2. Creating the training dataset -### Current data +Our ultimate goal for the training data is to have a 'snapshot' of a particular stock's fundamentals at a particular time, and the corresponding subsequent annual performance of the stock. -Current data is parsed from Yahoo finance using regex. I tend to have to fix this whenever yahoo changes their UI :( +For example, if our 'snapshot' consists of all of the fundamental data for AAPL on the date 28/1/2005, then we also need to know the percentage price change of AAPL between 28/1/05 and 28/1/06. Thus our algorithm can learn how the fundamentals impact the annual change in the stock price. +In fact, this is a slight oversimplification. In fact, what the algorithm will eventually learn is how fundamentals impact the *outperformance of a stock relative to the S&P500 index*. This is why we also need index data. -## What each file does +### Preprocessing historical price data -### quandlData.py +When `pandas-datareader` downloads stock price data, it does not include rows for weekends and public holidays (when the market is closed). -Uses the quandl API to get historic adjusted stock prices, returning `stock_prices.csv`. +However, referring to the example of AAPL above, if our snapshot includes fundamental data for 28/1/05 and we want to see the change in price a year later, we will get the nasty surprise that 28/1/2006 is a Saturday. Does this mean that we have to discard this snapshot? -**update as of september 2017:** I suspect this is broken, because quandl changed their UI. -However, one can simply use this module - +By no means -- data is too valuable to callously toss away. As a workaround, I instead decided to 'fill forward' the missing data, i.e we will assume that the stock price on Saturday 28/1/2006 is equal to the stock price on Friday 27/1/2006. -### dataAcquisition.py +### Features -This is the bulk of the project. It looks through intraQuarter to parse the historical fundamental data into a pandas dataframe. Then, it adds to this dataframe the stock percentage change in a year. We compare this with the change in the S&P500 in the same year, to determine if the stock underperformed or outperformed. +Below is a list of some of the interesting variables that are available on Yahoo Finance. -Requires intraQuarter, `stock_prices.csv`, and `YAHOO-INDEX_GSPC.csv`. +#### Valuation measures -Outputs a csv called `key_stats_NO_NA_enhanced.csv`. +- 'Market Cap' +- Enterprise Value +- Trailing P/E +- Forward P/E +- PEG Ratio +- Price/Sales +- Price/Book +- Enterprise Value/Revenue +- Enterprise Value/EBITDA +#### Financials -### currentData.py +- Profit Margin +- Operating Margin +- Return on Assets +- Return on Equity +- Revenue +- Revenue Per Share +- Quarterly Revenue Growth +- Gross Profit +- EBITDA +- Net Income Avi to Common +- Diluted EPS +- Quarterly Earnings Growth +- Total Cash +- Total Cash Per Share +- Total Debt +- Total Debt/Equity +- Current Ratio +- Book Value Per Share +- Operating Cash Flow +- Levered Free Cash Flow -Parses the current stock fundamentals from sg.finance.yahoo, and puts them into a dataframe, finally returning a csv called `forward_sample_NO_NA.csv`. +#### Trading information -### stockPrediction.py +- Beta +- 50-Day Moving Average +- 200-Day Moving Average +- Avg Vol (3 month) +- Shares Outstanding +- Float +- % Held by Insiders +- % Held by Institutions +- Shares Short +- Short Ratio +- Short % of Float +- Shares Short (prior month) -The machine learning. Uses a linear SVM to fit the data, then predicts the outcome. Returns a list of stocks to invest in. +### Parsing -## Dependencies +However, all of this data is locked up in HTML files. Thus, we need to build a parser. In this project, I did the parsing with regex, but please note that generally it is [really not recommended](https://stackoverflow.com/questions/1732348/regex-match-open-tags-except-xhtml-self-contained-tags) to use regex to parse HTML. However, I think regex probably wins out for ease of understanding (this project being educational in nature), and from experience regex works fine in this case. -Being lazy, I have copied all the unique import statements. +This is the exact regex used: ```python -import numpy as np -from sklearn import svm, preprocessing -import pandas as pd -from collections import Counter -import os -import re -import time -import urllib.request -from datetime import datetime -from Quandl import Quandl +r'>' + re.escape(variable) + r'.*?(\-?\d+\.*\d*K?M?B?|N/A[\\n|\s]*|>0|NaN)%?(|)' +``` + +While it looks pretty arcane, all it is doing is searching for the first occurence of the feature (e.g "Market Cap"), then it looks forward until it finds a number immediately followed by a `` or `` (signifying the end of a table entry). The complexity of the expression above accounts for some subtleties in the parsing: + +- the numbers could be preceeded by a minus sign +- Yahoo Finance sometimes uses K, M, and B as abbreviations for thousand, million and billion respectively. +- some data are given as percentages +- some datapoints are missing, so instead of a number we have to look for "N/A" or "NaN. + +Both the preprocessing of price data and the parsing of keystats are included in `parsing_keystats.py`. Run the following in your terminal: + +```bash +python parsing_keystats.py +``` + +You should see the file `keystats.csv` appear in your working directory. Now that we have the training data ready, we are ready to actually do some machine learning. + +## 3. Backtesting + +Backetesting is arguably the most important part of any quantitative strategy: you must have some way of testing the performance of your algorithm before you live trade it. + +Despite its importance, I originally did not want to include backtesting code in this repository. The reasons were as follows: + +- Backtesting is messy and empirical. The code is not very pleasant to use, and in practice requires a lot of manual interaction. +- Backtesting is very difficult to get right, and if you do it wrong, you will be deceiving yourself with high returns. +- Developing and working with your backtest is probably the best way to learn about machine learning and stocks -- you'll see what works, what doesn't, and what you don't understand. + +Nevertheless, because of the importance of backtesting, I decided that I can't really call this a 'template machine learning stocks project' without backtesting. Thus, I have included a simplistic backtesting script. Please note that there is a fatal flaw with this backtesting implementation that will result in *much* higher backtesting returns. It is quite a subtle point, but I will let you figure that out :) + +Run the following in terminal: + +```bash +python backtesting.py +``` + +You should get something like this: + +```txt +Classifier performance +====================== +Accuracy score: 0.81 +Precision score: 0.75 + +Stock prediction performance report +=================================== +Total Trades: 177 +Average return for stock predictions: 37.8 % +Average market return in the same period: 9.2% +Compared to the index, our strategy earns 28.6 percentage points more +``` + +Again, the performance looks too good to be true and almost certainly is. + +## 4. Current fundamental data + +Now that we have trained and backtested a model on our data, we would like to generate actual predictions on current data. + +As always, we can scrape the data from good old Yahoo Finance. My method is to literally just download the statistics page for each stock (here is the [page](https://finance.yahoo.com/quote/AAPL/key-statistics?p=AAPL) for Apple), then to parse it using regex as before. + +In fact, the regex should be almost identical, but because Yahoo has changed their UI a couple of times, there are some minor differences. This part of the projet has to be fixed whenever yahoo finance changes their UI, and if you can't get the project to work, the problem is most likely here. + +Run the following in terminal: + +```bash +python current_data.py +``` + +The script will then begin downloading the HTML into the `forward/` folder within your working directory, before parsing this data and outputting the file `forward_sample.csv`. + +## 5. Stock prediction + +Now that we have the training data and the current data, we can finally generate actual predictions. This part of the project is very simple, so go ahead and run the script: + +```bash +python stock_prediction.py +``` + +You should get something like this: + +```txt +21 stocks predicted to outperform the S&P500 by more than 10%: +NOC FL SWK NFX LH NSC SCHL KSU DDS GWW AIZ ORLY R SFLY SHW GME DLX DIS AMP BBBY APD +``` + +## Unit testing + +I have included a number of unit tests (in the `tests/` folder) which serve to check that things are working properly. However, due to the nature of the some of this projects functionality (downloading big datasets), you will have to run all the code once before running the tests. Otherwise, the tests themselves would have to download huge datasets (which I don't think is optimal). + +I thus recommend that you run the tests after you have run all the other scripts (except, perhaps, `stock_prediction.py`). + +To run the tests, simply enter the following into a terminal instance in the project directory: + +```bash +pytest -v ``` + +Please note that it is not considered best practice to include an `__init__.py` file in the `tests/` directory (see [here](https://docs.pytest.org/en/latest/goodpractices.html) for more), but I have done it anyway because it is uncomplicated and functional. + +## Where to go from here + +I have stated that this project is extensible, so here are some ideas to get you started and possibly increase returns (no promises). + +### Data acquisition + +My personal belief is that better quality data is THE factor that will ultimately determine your performance. Here are some ideas: + +- Explore the other subfolders in Sentdex's `intraQuarter.zip`. +- Parse the annual reports that all companies submit to the SEC (have a look at the [Edgar Database](https://www.sec.gov/edgar/searchedgar/companysearch.html)) +- Try to find websites from which to scrape fundamental data (this has been my solution). +- Ditch US stocks and go global -- perhaps better results may be found in markets that are less-liquid. It'd be interesting to see whether the predictive power of features vary based on geography. +- Buy Quandl data, or experiment with alternative data. + +### Data preprocessing + +- Build a more robust parser using BeautifulSoup +- In this project, I have just ignored any rows with missing data, but this reduces the size of the dataset considerably. Are there any ways you can fill in some of this data? + - hint: if the PE ratio is missing but you know the stock price and the earnings/share... + - hint 2: how different is Apple's book value in March to its book value in June? +- Some form of feature engineering + - e.g, calculate [Graham's number](https://www.investopedia.com/terms/g/graham-number.asp) and use it as a feature + - some of the features are probably redundant. Why not remove them to speed up training? +- Speed up the construction of `keystats.csv`. + - hint: don't keep appending to one growing dataframe! Split it into chunks + +### Machine learning + +Altering the machine learning stuff is probably the easiest and most fun to do. + +- The most important thing if you're serious about results is to find the problem with the current backtesting setup and fix it. This will likely be quite a sobering experience, but if your backtest is done right, it should mean that any observed outperformance on your test set can be traded on (again, do so at your own discretion). +- Try a different classifier -- there is plenty of research that advocates the use of SVMs, for example. Don't forget that other classifiers may require feature scaling etc. +- Hyperparameter tuning: use gridsearch to find the optimal hyperparameters for your classifier. But make sure you don't overfit! +- Make it *deep* -- experiment with neural networks (an easy way to start is with `sklearn.neural_network`). +- Change the classification problem into a regresion one: will we achieve better results if we try to predict the stock *price* rather than whether it outperformed? +- Run the prediction multiple times (perhaps using different hyperparameters?) and select the *k* most common stocks to invest in. This is especially important if the algorithm is not deterministic (as is the case for Random Forest) +- Experiment with different values of the `outperformance` parameter. +- Try to plot the importance of different features to 'see what the machine sees'. + +## Contributing + +Feel free to fork, play around, and submit PRs. I would be very grateful for any bug fixes or more unit tests. + +--- + +For more content like this, check out my academic blog at [reasonabledeviations.science/](https://reasonabledeviations.science/). Who knows, you may find some more hints about how to improve returns. \ No newline at end of file diff --git a/backtesting.py b/backtesting.py new file mode 100644 index 00000000..895f5d51 --- /dev/null +++ b/backtesting.py @@ -0,0 +1,78 @@ +# Preprocessing +import numpy as np +import pandas as pd +from sklearn.model_selection import train_test_split +from sklearn.ensemble import RandomForestClassifier +from sklearn.metrics import precision_score +from utils import status_calc + + +def backtest(): + """ + A simple backtest, which splits the dataset into a train set and test set, + then fits a Random Forest classifier to the train set. We print the precision and accuracy + of the classifier on the test set, then run a backtest comparing this strategy's performance + to passive investment in the S&P500. + Please note that there is a methodological flaw in this backtest which will give deceptively + good results, so the results here should not encourage you to live trade. + """ + # Build the dataset, and drop any rows with missing values + data_df = pd.read_csv("keystats.csv", index_col='Date') + data_df.dropna(axis=0, how='any', inplace=True) + + features = data_df.columns[6:] + X = data_df[features].values + # The labels are generated by applying the status_calc to the dataframe. + # '1' if a stock beats the S&P500 by more than 10%, else '0' + y = list( + map(status_calc, data_df["stock_p_change"], data_df["SP500_p_change"])) + + # z is required for us to track returns + z = np.array(data_df[["stock_p_change", "SP500_p_change"]]) + + # Generate the train set and test set by randomly splitting the dataset + X_train, X_test, y_train, y_test, z_train, z_test = train_test_split( + X, y, z, test_size=0.2) + + # Instantiate a RandomForestClassifier with 100 trees, then fit it to the training data + clf = RandomForestClassifier(n_estimators=100, random_state=0) + clf.fit(X_train, y_train) + + # Generate the predictions, then print test set accuracy and precision + y_pred = clf.predict(X_test) + print("Classifier performance\n", "=" * 20) + print(f"Accuracy score: {clf.score(X_test, y_test): .2f}") + print(f"Precision score: {precision_score(y_test, y_pred): .2f}") + + num_positive_predictions = sum(y_pred) + if num_positive_predictions < 0: + print("No stocks predicted!") + + # Recall that z_test stores the change in stock price in column 0, and the + # change in S&P500 price in column 1. + # Whenever a stock is predicted to outperform (y_pred = 1), we 'buy' that stock + # and simultaneously `buy` the index for comparison. + stock_returns = 1 + z_test[y_pred, 0] / 100 + market_returns = 1 + z_test[y_pred, 1] / 100 + + # Calculate the average growth for each stock we predicted 'buy' + # and the corresponding index growth + avg_predicted_stock_growth = sum(stock_returns) / num_positive_predictions + index_growth = sum(market_returns) / num_positive_predictions + + percentage_stock_returns = 100 * (avg_predicted_stock_growth - 1) + percentage_market_returns = 100 * (index_growth - 1) + total_outperformance = percentage_stock_returns - percentage_market_returns + + print("\n Stock prediction performance report \n", "=" * 40) + print(f"Total Trades:", num_positive_predictions) + print( + f"Average return for stock predictions: {percentage_stock_returns: .1f} %") + print( + f"Average market return in the same period: {percentage_market_returns: .1f}% ") + print( + f"Compared to the index, our strategy earns {total_outperformance: .1f} percentage points more") + + +if __name__ == "__main__": + backtest() diff --git a/current_data.py b/current_data.py index e285d7bd..083ff022 100644 --- a/current_data.py +++ b/current_data.py @@ -2,223 +2,149 @@ import os import re import time -import urllib.request - -# Enter the file path to the intraQuarter directory -path = "/Users/User/intraQuarter" +import requests +import numpy as np +from utils import data_string_to_float + +# The path to your fundamental data +statspath = "intraQuarter/_KeyStats/" + +# These are the features that will be parsed +features = [ # Valuation measures + 'Market Cap', + 'Enterprise Value', + 'Trailing P/E', + 'Forward P/E', + 'PEG Ratio', + 'Price/Sales', + 'Price/Book', + 'Enterprise Value/Revenue', + 'Enterprise Value/EBITDA', + # Financials + 'Profit Margin', + 'Operating Margin', + 'Return on Assets', + 'Return on Equity', + 'Revenue', + 'Revenue Per Share', + 'Quarterly Revenue Growth', + 'Gross Profit', + 'EBITDA', + 'Net Income Avi to Common', + 'Diluted EPS', + 'Quarterly Earnings Growth', + 'Total Cash', + 'Total Cash Per Share', + 'Total Debt', + 'Total Debt/Equity', + 'Current Ratio', + 'Book Value Per Share', + 'Operating Cash Flow', + 'Levered Free Cash Flow', + # Trading information + 'Beta', + '50-Day Moving Average', + '200-Day Moving Average', + 'Avg Vol (3 month)', + 'Shares Outstanding', + 'Float', + '% Held by Insiders', + '% Held by Institutions', + 'Shares Short', + 'Short Ratio', + 'Short % of Float', + 'Shares Short (prior month)'] def check_yahoo(): """ - Retrieves the stock ticker from intraQuarter, then downloads the html file from yahoo finance. - :return: forward/ filled with the html file for each ticker + Retrieves the stock ticker from the _KeyStats directory, then downloads the html file from yahoo finance. + :return: a directory named `forward/` filled with the html files for each ticker """ - statspath = path + '/_KeyStats/' - stock_list = [x[0] for x in os.walk(statspath)] + # Create the directory where we will store the current data + if not os.path.exists('forward/'): + os.makedirs('forward/') + + # Retrieve a list of tickers from the fundamental data folder + ticker_list = os.listdir(statspath) + + # Required in macOS to remove the hidden index file. + if '.DS_Store' in ticker_list: + ticker_list.remove('.DS_Store') - # Parse yahoo finance based on these tickers - for each_dir in stock_list[1:]: + for ticker in ticker_list: try: - # Get the ticker from intraQuarter - ticker = each_dir.split(statspath)[1] - link = "http://finance.yahoo.com/quote/" + ticker.upper() + "/key-statistics" - resp = urllib.request.urlopen(link).read() + link = f"http://finance.yahoo.com/quote/{ticker.upper()}/key-statistics" + resp = requests.get(link) # Write results to forward/ - save = "forward/" + str(ticker) + ".html" - file = open(save, "w") - file.write(str(resp)) - file.close() - + save = f"forward/{ticker}.html" + with open(save, 'w') as file: + file.write(resp.text) print(save) except Exception as e: - print(str(e)) + print(f"{ticker}: {str(e)}\n") time.sleep(2) def forward(): """ - Creates the forward sample, by parsing the html that we downloaded in check_yahoo(). - Reads this data into a dataframe, then converts to a csv. - :return: the forward sample as a csv. + Creates the forward sample by parsing the current data html files that we downloaded in check_yahoo(). + :return: a pandas dataframe containing all of the current data for each ticker. """ - # The parameters which we will search for - gather = ["Total Debt/Equity", - 'Trailing P/E', - 'Price/Sales', - 'Price/Book', - 'Profit Margin', - 'Operating Margin', - 'Return on Assets', - 'Return on Equity', - 'Revenue Per Share', - 'Market Cap', - 'Enterprise Value', - 'Forward P/E', - 'PEG Ratio', - 'Enterprise Value/Revenue', - 'Enterprise Value/EBITDA', - 'Revenue', - 'Gross Profit', - 'EBITDA', - 'Net Income Avi to Common', - 'Diluted EPS', - 'Earnings Growth', - 'Revenue Growth', - 'Total Cash', - 'Total Cash Per Share', - 'Total Debt', - 'Current Ratio', - 'Book Value Per Share', - 'Cash Flow', - 'Beta', - 'Held by Insiders', - 'Held by Institutions', - 'Shares Short', - 'Short Ratio', - 'Short % of Float', - 'Shares Short (prior '] - - # The empty dataframe which we will fill - df = pd.DataFrame(columns=['Date', - 'Unix', - 'Ticker', - 'Price', - 'stock_p_change', - 'SP500', - 'sp500_p_change', - 'Difference', - 'DE Ratio', - 'Trailing P/E', - 'Price/Sales', - 'Price/Book', - 'Profit Margin', - 'Operating Margin', - 'Return on Assets', - 'Return on Equity', - 'Revenue Per Share', - 'Market Cap', - 'Enterprise Value', - 'Forward P/E', - 'PEG Ratio', - 'Enterprise Value/Revenue', - 'Enterprise Value/EBITDA', - 'Revenue', - 'Gross Profit', - 'EBITDA', - 'Net Income Avl to Common ', - 'Diluted EPS', - 'Earnings Growth', - 'Revenue Growth', - 'Total Cash', - 'Total Cash Per Share', - 'Total Debt', - 'Current Ratio', - 'Book Value Per Share', - 'Cash Flow', - 'Beta', - 'Held by Insiders', - 'Held by Institutions', - 'Shares Short (as of', - 'Short Ratio', - 'Short % of Float', - 'Shares Short (prior ', - 'Status']) - - file_list = os.listdir("forward") - - # This is a requirement if you are on a mac. Inelegant code just to remove the DS_store. - # Thanks Apple! - if '.DS_Store' in file_list: - del file_list[file_list.index('.DS_Store')] + # Creating an empty dataframe which we will later fill. In addition to the features, we need some index variables + # (date, unix timestamp, ticker), and of course the dependent variables (prices). + df_columns = ['Date', + 'Unix', + 'Ticker', + 'Price', + 'stock_p_change', + 'SP500', + 'SP500_p_change'] + features - # This is the actual parsing. This needs to be fixed every time yahoo changes their UI. - for each_file in file_list: - ticker = each_file.split(".html")[0] - full_file_path = "forward/" + each_file - source = open(full_file_path, "r").read() + df = pd.DataFrame(columns=df_columns) - try: - value_list = [] - - for each_data in gather: - try: - regex = re.escape(each_data) + r'.*?(\d{1,8}\.\d{1,8}M?B?|N/A)%?' - value = re.search(regex, source) - value = (value.group(1)) - - if "B" in value: - value = float(value.replace("B", '')) * 1000000000 - elif "M" in value: - value = float(value.replace("M", '')) * 1000000 - - value_list.append(value) - - except Exception: - value = 'N/A' - value_list.append(value) - - if value_list.count("N/A") > 0: - # This is why our result is 'forward_sample_NO_NA'. Change this if you want NA. - # But of course, in that case you need to deal with the NA later on. - pass - - else: - print(each_file) - - # I know this is ugly, but it's practical. - df = df.append({'Date': "N/A", - 'Unix': "N/A", - 'Ticker': ticker, - 'Price': "N/A", - 'stock_p_change': "N/A", - 'SP500': "N/A", - 'sp500_p_change': "N/A", - 'Difference': "N/A", - 'DE Ratio': value_list[0], - 'Trailing P/E': value_list[1], - 'Price/Sales': value_list[2], - 'Price/Book': value_list[3], - 'Profit Margin': value_list[4], - 'Operating Margin': value_list[5], - 'Return on Assets': value_list[6], - 'Return on Equity': value_list[7], - 'Revenue Per Share': value_list[8], - 'Market Cap': value_list[9], - 'Enterprise Value': value_list[10], - 'Forward P/E': value_list[11], - 'PEG Ratio': value_list[12], - 'Enterprise Value/Revenue': value_list[13], - 'Enterprise Value/EBITDA': value_list[14], - 'Revenue': value_list[15], - 'Gross Profit': value_list[16], - 'EBITDA': value_list[17], - 'Net Income Avl to Common ': value_list[18], - 'Diluted EPS': value_list[19], - 'Earnings Growth': value_list[20], - 'Revenue Growth': value_list[21], - 'Total Cash': value_list[22], - 'Total Cash Per Share': value_list[23], - 'Total Debt': value_list[24], - 'Current Ratio': value_list[25], - 'Book Value Per Share': value_list[26], - 'Cash Flow': value_list[27], - 'Beta': value_list[28], - 'Held by Insiders': value_list[29], - 'Held by Institutions': value_list[30], - 'Shares Short (as of': value_list[31], - 'Short Ratio': value_list[32], - 'Short % of Float': value_list[33], - 'Shares Short (prior ': value_list[34], - 'Status': "N/A"}, ignore_index=True) - - except Exception: - pass - - df.to_csv("forward_sample_NO_NA.csv") - - -# Call the functions to produce the csv. -check_yahoo() -forward() + tickerfile_list = os.listdir('forward/') + + # Required in macOS to remove the hidden index file. + if '.DS_Store' in tickerfile_list: + ticker_list.remove('.DS_Store') + + # This is the actual parsing. This needs to be fixed every time yahoo changes their UI. + for tickerfile in tickerfile_list: + ticker = tickerfile.split('.html')[0].upper() + source = open(f"forward/{tickerfile}").read() + # Remove commas from the html to make parsing easier. + source = source.replace(',', '') + + # Regex search for the different variables in the html file, then append to value_list + value_list = [] + for variable in features: + try: + # Basically, look for the first number present after we an occurence of the variable + regex = r'>' + re.escape(variable) + r'.*?(\-?\d+\.*\d*K?M?B?|N/A[\\n|\s]*|>0|NaN)%?' \ + r'(|)' + value = re.search(regex, source, flags=re.DOTALL).group(1) + + # Dealing with number formatting + value_list.append(data_string_to_float(value)) + + # The data may not be present. Process accordingly. + except AttributeError: + value_list.append('N/A') + print(ticker, variable) + + # Append the ticker and the features to the dataframe + new_df_row = [0, 0, ticker, + 0, 0, 0, 0] + value_list + + df = df.append(dict(zip(df_columns, new_df_row)), ignore_index=True) + + return df.replace('N/A', np.nan) + + +if __name__ == '__main__': + check_yahoo() + current_df = forward() + current_df.to_csv('forward_sample.csv', index=False) diff --git a/dataAcquisition.py b/dataAcquisition.py deleted file mode 100644 index 87f410d4..00000000 --- a/dataAcquisition.py +++ /dev/null @@ -1,293 +0,0 @@ -import pandas as pd -import os -import time -import re -from datetime import datetime - -# How much a stock has to outperform the S&P500 to be considered a success. Subjective. -how_much_better = 5 - -# Enter the path to intraQuarter -path = "/Users/User/intraQuarter" - - -# The list of features we will be looking at. No choice but to hard code it. -def key_stats(gather=["Total Debt/Equity", - 'Trailing P/E', - 'Price/Sales', - 'Price/Book', - 'Profit Margin', - 'Operating Margin', - 'Return on Assets', - 'Return on Equity', - 'Revenue Per Share', - 'Market Cap', - 'Enterprise Value', - 'Forward P/E', - 'PEG Ratio', - 'Enterprise Value/Revenue', - 'Enterprise Value/EBITDA', - 'Revenue', - 'Gross Profit', - 'EBITDA', - 'Net Income Avl to Common ', - 'Diluted EPS', - 'Earnings Growth', - 'Revenue Growth', - 'Total Cash', - 'Total Cash Per Share', - 'Total Debt', - 'Current Ratio', - 'Book Value Per Share', - 'Cash Flow', - 'Beta', - 'Held by Insiders', - 'Held by Institutions', - 'Shares Short (as of', - 'Short Ratio', - 'Short % of Float', - 'Shares Short (prior ']): - """ - Looks at the data parsed for us by Sentdex (in intraQuarter) to make a csv of the historical - key statistics. - :param gather: The list of fundamentals which we need to gather. - :return: csv of historical key statistics, on which we will train our SVM. - """ - - # List of stocks - statspath = path + '/_KeyStats' - stock_list = [x[0] for x in os.walk(statspath)] - - df = pd.DataFrame(columns=['Date', - 'Unix', - 'Ticker', - 'Price', - 'stock_p_change', - 'SP500', - 'sp500_p_change', - 'Difference', - 'DE Ratio', - 'Trailing P/E', - 'Price/Sales', - 'Price/Book', - 'Profit Margin', - 'Operating Margin', - 'Return on Assets', - 'Return on Equity', - 'Revenue Per Share', - 'Market Cap', - 'Enterprise Value', - 'Forward P/E', - 'PEG Ratio', - 'Enterprise Value/Revenue', - 'Enterprise Value/EBITDA', - 'Revenue', - 'Gross Profit', - 'EBITDA', - 'Net Income Avl to Common ', - 'Diluted EPS', - 'Earnings Growth', - 'Revenue Growth', - 'Total Cash', - 'Total Cash Per Share', - 'Total Debt', - 'Current Ratio', - 'Book Value Per Share', - 'Cash Flow', - 'Beta', - 'Held by Insiders', - 'Held by Institutions', - 'Shares Short (as of', - 'Short Ratio', - 'Short % of Float', - 'Shares Short (prior ', - 'Status']) - - sp500_df = pd.DataFrame.from_csv("YAHOO-INDEX_GSPC.csv") - stock_df = pd.DataFrame.from_csv("stock_prices.csv") - - ticker_list = [] - - # For each stock, we create a list containing all of the different data files - # (corresponding to different dates.) - for each_dir in stock_list[1:]: - each_file = os.listdir(each_dir) - - # snippet to get rid of the stupid .DS_Store file that osx makes. - if '.DS_Store' in each_file: - del each_file[each_file.index('.DS_Store')] - - ticker = each_dir.split(statspath)[1] # retrieves the stock symbol - ticker_list.append(ticker) - - if len(each_file) > 0: # checking that the data exists - for file in each_file: - - # Retrieve the datestamp, and convert to unix format. - date_stamp = datetime.strptime(file, '%Y%m%d%H%M%S.html') - unix_time = time.mktime(date_stamp.timetuple()) - - full_file_path = each_dir + '/' + file - source = open(full_file_path, 'r').read() - - try: - value_list = [] - - for each_data in gather: - try: - # Use a regex to find the value associated with the variable of interest. - regex = re.escape( - each_data) + r'.*?(\d{1,8}\.\d{1,8}M?B?|N/A)%?' - value = re.search(regex, source) - value = (value.group(1)) - - if "B" in value: - value = float(value.replace( - "B", '')) * 1000000000 - elif "M" in value: - value = float(value.replace("M", '')) * 1000000 - - value_list.append(value) - - except Exception: - value = 'N/A' - value_list.append(value) - - try: - sp500_date = datetime.fromtimestamp( - unix_time).strftime('%Y-%m-%d') - row = sp500_df[(sp500_df.index == sp500_date)] - sp500_value = float(row["Adj Close"]) - except: - # this except statement is in case we end up looking for data on a weekend, - # if this is the case, we subtract three days - try: - sp500_date = datetime.fromtimestamp( - unix_time - 259200).strftime('%Y-%m-%d') - row = sp500_df[(sp500_df.index == sp500_date)] - sp500_value = float(row["Adj Close"]) - except Exception as e: - print("sp500", str(e)) - - # We want to see the stock's performance one year later. - one_year_later = int(unix_time + 31536000) - - try: - sp500_1y = datetime.fromtimestamp( - one_year_later).strftime('%Y-%m-%d') - row = sp500_df[(sp500_df.index == sp500_1y)] - sp500_1y_value = float(row["Adj Close"]) - except: - try: - # Again, for the case that we query data on a weekend - sp500_1y = datetime.fromtimestamp( - one_year_later - 259200).strftime('%Y-%m-%d') - row = sp500_df[(sp500_df.index == sp500_1y)] - sp500_1y_value = float(row["Adj Close"]) - except Exception as e: - print("sp500 1 year later issue", str(e)) - - try: - stock_price_1y = datetime.fromtimestamp( - one_year_later).strftime('%Y-%m-%d') - row = stock_df[(stock_df.index == - stock_price_1y)][ticker.upper()] - - stock_1y_value = round(float(row), 2) - - except Exception as e: - try: - stock_price_1y = datetime.fromtimestamp( - one_year_later - 259200).strftime('%Y-%m-%d') - row = stock_df[(stock_df.index == - stock_price_1y)][ticker.upper()] - stock_1y_value = round(float(row), 2) - except Exception as e: - print("stock price:", str(e)) - - try: - stock_price = datetime.fromtimestamp( - unix_time).strftime('%Y-%m-%d') - row = stock_df[(stock_df.index == - stock_price)][ticker.upper()] - stock_price = round(float(row), 2) - - except Exception as e: - try: - stock_price = datetime.fromtimestamp( - unix_time - 259200).strftime('%Y-%m-%d') - row = stock_df[(stock_df.index == - stock_price)][ticker.upper()] - stock_price = round(float(row), 2) - except Exception as e: - print("stock price:", str(e)) - - stock_p_change = round( - ((stock_1y_value - stock_price) / stock_price * 100), 2) - sp500_p_change = round( - ((sp500_1y_value - sp500_value) / sp500_value * 100), 2) - - # If the stock outperformed the SP500 by 5%, label it 'outperform'. - if stock_p_change - sp500_p_change > how_much_better: - status = "outperform" - else: - status = "underperform" - - # This determines if we have NA or no NA - if value_list.count("N/A") > 0: - pass - else: - df = df.append({'Date': date_stamp, - 'Unix': unix_time, - 'Ticker': ticker, - 'Price': stock_price, - 'stock_p_change': stock_p_change, - 'SP500': sp500_value, - 'sp500_p_change': sp500_p_change, - 'Difference': stock_p_change - sp500_p_change, - 'DE Ratio': value_list[0], - # 'Market Cap': value_list[1], - 'Trailing P/E': value_list[1], - 'Price/Sales': value_list[2], - 'Price/Book': value_list[3], - 'Profit Margin': value_list[4], - 'Operating Margin': value_list[5], - 'Return on Assets': value_list[6], - 'Return on Equity': value_list[7], - 'Revenue Per Share': value_list[8], - 'Market Cap': value_list[9], - 'Enterprise Value': value_list[10], - 'Forward P/E': value_list[11], - 'PEG Ratio': value_list[12], - 'Enterprise Value/Revenue': value_list[13], - 'Enterprise Value/EBITDA': value_list[14], - 'Revenue': value_list[15], - 'Gross Profit': value_list[16], - 'EBITDA': value_list[17], - 'Net Income Avl to Common ': value_list[18], - 'Diluted EPS': value_list[19], - 'Earnings Growth': value_list[20], - 'Revenue Growth': value_list[21], - 'Total Cash': value_list[22], - 'Total Cash Per Share': value_list[23], - 'Total Debt': value_list[24], - 'Current Ratio': value_list[25], - 'Book Value Per Share': value_list[26], - 'Cash Flow': value_list[27], - 'Beta': value_list[28], - 'Held by Insiders': value_list[29], - 'Held by Institutions': value_list[30], - 'Shares Short (as of': value_list[31], - 'Short Ratio': value_list[32], - 'Short % of Float': value_list[33], - 'Shares Short (prior ': value_list[34], - 'Status': status}, ignore_index=True) - except Exception as e: - # This is awful practice from an awful coder. - pass - - df.to_csv("key_stats_NO_NA_enhanced.csv") - - -# Call the function to produce the csv -key_stats() diff --git a/download_historical_prices.py b/download_historical_prices.py new file mode 100644 index 00000000..2b10a782 --- /dev/null +++ b/download_historical_prices.py @@ -0,0 +1,82 @@ +import os +from pandas_datareader import data as pdr +import pandas as pd +import fix_yahoo_finance as yf +yf.pdr_override() + + +START_DATE = "2003-08-01" +END_DATE = "2015-01-01" + + +def build_stock_dataset(start=START_DATE, end=END_DATE): + """ + Creates the dataset containing all stock prices + :returns: stock_prices.csv + """ + + statspath = "intraQuarter/_KeyStats/" + ticker_list = os.listdir(statspath) + + # Required on macOS + if '.DS_Store' in ticker_list: + os.remove(f"{statspath}/.DS_Store") + ticker_list.remove('.DS_Store') + + # Get all Adjusted Close prices for all the tickers in our list, + # between START_DATE and END_DATE + all_data = pdr.get_data_yahoo(ticker_list, start, end) + stock_data = all_data['Adj Close'] + + # Remove any columns that hold no data, and print their tickers. + stock_data.dropna(how='all', axis=1, inplace=True) + missing_tickers = [ + ticker for ticker in ticker_list if ticker.upper() not in stock_data.columns] + print(f"{len(missing_tickers)} tickers are missing: \n {missing_tickers} ") + # If there are only some missing datapoints, forward fill. + stock_data.ffill(inplace=True) + stock_data.to_csv('stock_prices.csv') + + +def build_sp500_dataset(start=START_DATE, end=END_DATE): + """ + Creates the dataset containing S&P500 prices + :returns: sp500_index.csv + """ + index_data = pdr.get_data_yahoo('SPY', start=START_DATE, end=END_DATE) + index_data.to_csv("sp500_index.csv") + + +def build_dataset_iteratively(idx_start, idx_end, date_start=START_DATE, date_end=END_DATE): + """ + This is an alternative iterative solution to building the stock dataset, which may be necessary if the + tickerlist is too big. + Instead of downloading all at once, we download ticker by ticker and append to a dataframe. + This will download data for tickerlist[idx_start:idx_end], which makes this method suitable + for chunking data. + + :param idx_start: (int) the starting index of the tickerlist + :param idx_end: (int) the end index of the tickerlist + """ + + statspath = "intraQuarter/_KeyStats/" + ticker_list = os.listdir(statspath) + + df = pd.DataFrame() + # possible methods. Also works better for batches. + for ticker in ticker_list: + ticker = ticker.upper() + + stock_ohlc = pdr.get_data_yahoo( + ticker, start=date_start, end=date_end) + if stock_ohlc.empty: + print(f"No data for {ticker}") + continue + adj_close = stock_ohlc['Adj Close'].rename(ticker) + df = pd.concat([df, adj_close], axis=1) + df.to_csv('stock_prices.csv') + + +if __name__ == "__main__": + build_stock_dataset() + build_sp500_dataset() diff --git a/parsing_keystats.py b/parsing_keystats.py new file mode 100644 index 00000000..c1e1b98d --- /dev/null +++ b/parsing_keystats.py @@ -0,0 +1,216 @@ +import pandas as pd +import os +import time +import re +from datetime import datetime +from utils import data_string_to_float + + +# The directory where individual html files are stored +statspath = "intraQuarter/_KeyStats/" + +# The list of features to parse from the html files +features = [ # Valuation measures + 'Market Cap', + 'Enterprise Value', + 'Trailing P/E', + 'Forward P/E', + 'PEG Ratio', + 'Price/Sales', + 'Price/Book', + 'Enterprise Value/Revenue', + 'Enterprise Value/EBITDA', + # Financial highlights + 'Profit Margin', + 'Operating Margin', + 'Return on Assets', + 'Return on Equity', + 'Revenue', + 'Revenue Per Share', + 'Qtrly Revenue Growth', + 'Gross Profit', + 'EBITDA', + 'Net Income Avl to Common', + 'Diluted EPS', + 'Qtrly Earnings Growth', + 'Total Cash', + 'Total Cash Per Share', + 'Total Debt', + 'Total Debt/Equity', + 'Current Ratio', + 'Book Value Per Share', + 'Operating Cash Flow', + 'Levered Free Cash Flow', + # Trading information + 'Beta', + '50-Day Moving Average', + '200-Day Moving Average', + 'Avg Vol (3 month)', + 'Shares Outstanding', + 'Float', + '% Held by Insiders', + '% Held by Institutions', + 'Shares Short (as of', + 'Short Ratio', + 'Short % of Float', + 'Shares Short (prior month)'] + + +def preprocess_price_data(): + """ + Currently, the sp500 and stock price datasets we downloaded do not have any data for + days when the market was closed (weekends and public holidays). We need to amend this so that + all rows are included. Doing this now saves a lot of effort when we actually create the + keystats dataset, which requires that we have stock data every day. + :return: SP500 and stock dataframes, with no missing rows. + """ + # Read in SP500 data and stock data, parsing the dates. + sp500_raw_data = pd.read_csv( + "sp500_index.csv", index_col='Date', parse_dates=True) + stock_raw_data = pd.read_csv( + "stock_prices.csv", index_col='Date', parse_dates=True) + + # We will reindex to include the weekends. + start_date = str(stock_raw_data.index[0]) + end_date = str(stock_raw_data.index[-1]) + idx = pd.date_range(start_date, end_date) + sp500_raw_data = sp500_raw_data.reindex(idx) + stock_raw_data = stock_raw_data.reindex(idx) + + # Now the weekends are NaN, so we fill forward these NaNs + # (i.e weekends take the value of Friday's adjusted close). + sp500_raw_data.ffill(inplace=True) + stock_raw_data.ffill(inplace=True) + + return sp500_raw_data, stock_raw_data + + +def parse_keystats(sp500_df, stock_df): + """ + We have downloaded a large number of html files, which are snapshots of a ticker at different times, + containing the fundamental data (our features). To extract the key statistics, we use regex. + For supervised machine learning, we also need the data that will form our dependent variable, + the performance of the stock compared to the SP500. + :sp500_df: dataframe containing SP500 prices + :stock_df: dataframe containing stock prices + :return: a dataframe of training data (i.e features and the components of our dependent variable) + """ + # The tickers whose data is to be parsed. + stock_list = [x[0] for x in os.walk(statspath)] + stock_list = stock_list[1:] + + # Creating a new dataframe which we will later fill. + df_columns = ['Date', + 'Unix', + 'Ticker', + 'Price', + 'stock_p_change', + 'SP500', + 'SP500_p_change'] + features + + df = pd.DataFrame(columns=df_columns) + + for stock_directory in stock_list: + keystats_html_files = os.listdir(stock_directory) + + # Snippet to get rid of the .DS_Store file in macOS + if '.DS_Store' in keystats_html_files: + keystats_html_files.remove('.DS_Store') + + ticker = stock_directory.split(statspath)[1] + + for file in keystats_html_files: + # Convert the datetime format of our file to unix time + date_stamp = datetime.strptime(file, '%Y%m%d%H%M%S.html') + unix_time = time.mktime(date_stamp.timetuple()) + + # Read in the html file as a string. + full_file_path = stock_directory + '/' + file + + # This will store the parsed values + value_list = [] + + with open(full_file_path, 'r') as source: + source = source.read() + # Remove commas from the html to make parsing easier. + source = source.replace(',', '') + + # Regex search for the different variables in the html file, then append to value_list + for variable in features: + # Search for the table entry adjacent to the variable name. + try: + regex = r'>' + re.escape(variable) + r'.*?(\-?\d+\.*\d*K?M?B?|N/A[\\n|\s]*|>0|NaN)%?' \ + r'(|)' + value = re.search( + regex, source, flags=re.DOTALL).group(1) + + # Dealing with number formatting + value_list.append(data_string_to_float(value)) + + # The data may not be present. Process accordingly + except AttributeError: + # In the past, 'Avg Vol' was instead named 'Average Volume' + # If 'Avg Vol' fails, search for 'Average Volume'. + if variable == 'Avg Vol (3 month)': + try: + new_variable = '>Average Volume (3 month)' + regex = re.escape(new_variable) + r'.*?(\-?\d+\.*\d*K?M?B?|N/A[\\n|\s]*|>0)%?' \ + r'(|)' + value = re.search( + regex, source, flags=re.DOTALL).group(1) + value_list.append(data_string_to_float(value)) + except AttributeError: + value_list.append('N/A') + else: + value_list.append('N/A') + + # Print any failures that aren't the common ones. + common_fails = ['Qtrly Revenue Growth', 'Operating Cash Flow', 'Levered Free Cash Flow', + 'Payout Ratio', 'Qtrly Earnings Growth'] + if variable not in common_fails: + print(ticker, file, variable) + + # We need the stock price and SP500 price now and one year from now. + # Convert from unix time to YYYY-MM-DD, so we can look for the price in the dataframe + # then calculate the percentage change. + current_date = datetime.fromtimestamp( + unix_time).strftime('%Y-%m-%d') + one_year_later = datetime.fromtimestamp( + unix_time + 31536000).strftime('%Y-%m-%d') + + # SP500 prices now and one year later, and the perce + sp500_price = float(sp500_df.loc[current_date, 'Adj Close']) + sp500_1y_price = float(sp500_df.loc[one_year_later, 'Adj Close']) + sp500_p_change = round( + ((sp500_1y_price - sp500_price) / sp500_price * 100), 2) + + # Stock prices now and one year later. We need a try/except because some data is missing + stock_price, stock_1y_price = 'N/A', 'N/A' + try: + stock_price = float(stock_df.loc[current_date, ticker.upper()]) + stock_1y_price = float( + stock_df.loc[one_year_later, ticker.upper()]) + except KeyError: + # If stock data is missing, we must skip this datapoint + print(f"PRICE RETRIEVAL ERROR for {ticker}") + continue + + stock_p_change = round( + ((stock_1y_price - stock_price) / stock_price * 100), 2) + + # Append all our data to the dataframe. + new_df_row = [date_stamp, unix_time, ticker, + stock_price, stock_p_change, sp500_price, sp500_p_change] + value_list + + df = df.append(dict(zip(df_columns, new_df_row)), + ignore_index=True) + + # Remove rows with missing stock price data + df.dropna(axis=0, subset=['Price', 'stock_p_change'], inplace=True) + # Output the CSV + df.to_csv('keystats.csv', index=False) + + +if __name__ == '__main__': + sp500_df, stock_df = preprocess_price_data() + parse_keystats(sp500_df, stock_df) diff --git a/quandlData.py b/quandlData.py deleted file mode 100644 index 60c3b1fc..00000000 --- a/quandlData.py +++ /dev/null @@ -1,35 +0,0 @@ -import pandas as pd -import os -from Quandl import Quandl - -# Obviously replace with your own path and API key. -path = "/Users/User/intraQuarter" -auth_tok = "enteryourkey" - - -def stock_prices(): - df = pd.DataFrame() - statspath = path + '/_KeyStats' - stock_list = [x[0] for x in os.walk(statspath)] - - for each_dir in stock_list[1:]: - try: - ticker = each_dir.split("/Users/User/intraQuarter/_KeyStats/")[1] - print(ticker) - name = "WIKI/" + ticker.upper() - - # Query Quandl, using the standard format, e.g WIKI/AAPL. - data = Quandl.get(name, - trim_start="2000-12-12", - trim_end="2014-12-30", - authtoken=auth_tok) - data[ticker.upper()] = data["Adj. Close"] - df = pd.concat([df, data[ticker.upper()]], axis=1) - - except Exception as e: - print(str(e)) - - df.to_csv("stock_prices.csv") - - -stock_prices() diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..861e07b9 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,7 @@ +pandas_datareader==0.5.0 +requests==2.18.4 +pytest==3.4.1 +pandas==0.22.0 +numpy==1.12.1 +fix_yahoo_finance==0.0.21 +scikit_learn==0.19.1 diff --git a/stockPrediction.py b/stockPrediction.py deleted file mode 100644 index 58e4fd36..00000000 --- a/stockPrediction.py +++ /dev/null @@ -1,112 +0,0 @@ -import numpy as np -from sklearn import svm, preprocessing -import pandas as pd -from collections import Counter - -# How much a stock has to outperform the S&P500 to be considered a success. -# Increase this value if you want fewer, but supposedly higher quality, predictions. -how_much_better = 15 - -FEATURES = ['DE Ratio', - 'Trailing P/E', - 'Price/Sales', - 'Price/Book', - 'Profit Margin', - 'Operating Margin', - 'Return on Assets', - 'Return on Equity', - 'Revenue Per Share', - 'Market Cap', - 'Enterprise Value', - 'Forward P/E', - 'PEG Ratio', - 'Enterprise Value/Revenue', - 'Enterprise Value/EBITDA', - 'Revenue', - 'Gross Profit', - 'EBITDA', - 'Net Income Avl to Common ', - 'Diluted EPS', - 'Earnings Growth', - 'Revenue Growth', - 'Total Cash', - 'Total Cash Per Share', - 'Total Debt', - 'Current Ratio', - 'Book Value Per Share', - 'Cash Flow', - 'Beta', - 'Held by Insiders', - 'Held by Institutions', - 'Shares Short (as of', - 'Short Ratio', - 'Short % of Float', - 'Shares Short (prior '] - - -def status_calc(stock, sp500): - return stock - sp500 > how_much_better - - -def build_data_set(): - training_data = pd.DataFrame.from_csv("key_stats_NO_NA_enhanced.csv") - - # Randomly reorder the data, and replace NA. - training_data = training_data.reindex(np.random.permutation(training_data.index)) - training_data = training_data.replace("NaN", 0).replace("N/A", 0) - - # Write out whether a stock has outperformed or not - training_data["Status2"] = list(map(status_calc, training_data["stock_p_change"], training_data["sp500_p_change"])) - - # Feature scaling - X_train = preprocessing.scale(np.array(training_data[FEATURES].values)) - - y_train = training_data["Status2"] \ - .replace("underperform", 0) \ - .replace("outperform", 1) \ - .values.tolist() - - return X_train, y_train - - -def analysis(): - # Fit the SVC (exclude the last column). - X_train, y_train = build_data_set() - clf = svm.SVC(kernel="rbf", C=8, gamma=0.3) - clf.fit(X_train[:-1], y_train[:-1]) - - # Now we get the actual data from which we want to generate predictions. - data = pd.DataFrame.from_csv("forward_sample_NO_NA.csv") - data = data.replace("N/A", 0).replace("NaN", 0) - - X_test = preprocessing.scale(np.array(data[FEATURES].values)) - Z = data["Ticker"].values.tolist() - invest_list = [] - - # If our SVM predicts outperformance, append that stock to an invest_list. - for i in range(len(X_test)): - p = clf.predict(X_test[i])[0] - if p: - invest_list.append(Z[i]) - - return invest_list - -# Run the analysis multiple times (in this case, eight), and print the results -# which have turned up more than 2/3 of the time. This code is very inelegant. - -final_list = [] -loops = 8 - -while loops: - stock_list = analysis() - for e in stock_list: - final_list.append(e) - loops -= 1 - -x = Counter(final_list) - -print(30 * "_") -for each_prediction in x: - # If the stock was predicted 2/3 of the time, append it. - if x[each_prediction] > loops - (loops / 3): - print(each_prediction) diff --git a/stock_prediction.py b/stock_prediction.py new file mode 100644 index 00000000..9ba3df19 --- /dev/null +++ b/stock_prediction.py @@ -0,0 +1,49 @@ +import pandas as pd +from sklearn.ensemble import RandomForestClassifier +from utils import data_string_to_float, status_calc + + +def build_data_set(): + """ + Reads the keystats.csv file and prepares it for scikit-learn + :return: X_train and y_train numpy arrays + """ + training_data = pd.read_csv("keystats.csv", index_col='Date') + training_data.dropna(axis=0, how='any', inplace=True) + features = training_data.columns[6:] + + X_train = training_data[features].values + # Generate the labels: '1' if a stock beats the S&P500 by more than 10%, else '0'. + y_train = list(map( + status_calc, training_data["stock_p_change"], training_data["SP500_p_change"])) + + return X_train, y_train + + +def predict_stocks(): + X_train, y_train = build_data_set() + # Remove the random_state parameter to generate actual predictions + clf = RandomForestClassifier(n_estimators=100, random_state=0) + clf.fit(X_train, y_train) + + # Now we get the actual data from which we want to generate predictions. + data = pd.read_csv('forward_sample.csv', index_col='Date') + data.dropna(axis=0, how='any', inplace=True) + features = data.columns[6:] + X_test = data[features].values + z = data["Ticker"].values + + # Get the predicted tickers + y_pred = clf.predict(X_test) + if sum(y_pred) == 0: + print("No stocks predicted!") + else: + invest_list = z[y_pred].tolist() + print( + f"{len(invest_list)} stocks predicted to outperform the S&P500 by more than 10%:") + print(' '.join(invest_list)) + return invest_list + + +if __name__ == '__main__': + predict_stocks() diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/test_datasets.py b/tests/test_datasets.py new file mode 100644 index 00000000..412af03d --- /dev/null +++ b/tests/test_datasets.py @@ -0,0 +1,97 @@ +import pytest +import os +import pandas as pd + +import parsing_keystats +import stock_prediction +import download_historical_prices +import current_data +import utils + + +def test_forward_sample_dimensions(): + """ + Check that the forward sample has been built correctly + """ + # Number of features + ['Date', 'Unix', 'Ticker', 'Price', 'stock_p_change', 'SP500', 'SP500_p_change'] + df = pd.read_csv('forward_sample.csv') + indexing_columns = ['Date', 'Unix', 'Ticker', 'Price', + 'stock_p_change', 'SP500', 'SP500_p_change'] + n_cols = len(df.columns) + assert n_cols == len(current_data.features) + len(indexing_columns) + assert len(df) == len(os.listdir('forward/')) + indexing_columns.remove('Ticker') + # Make sure that all of the indexing columns only contain zeroes + assert df[indexing_columns].sum().sum() == 0 + + +def test_forward_sample_data(): + """ + Some quick checks on the forward sample data + """ + df = pd.read_csv('forward_sample.csv') + # For these tests we need to fill in nan values with zero + df.fillna(0, inplace=True) + + # Make sure that these features have positive values + positive_features = ['Market Cap', 'Price/Sales', 'Revenue', 'Revenue Per Share', 'Total Cash', + 'Total Cash Per Share', 'Total Debt', '50-Day Moving Average', '200-Day Moving Average', + 'Avg Vol (3 month)', 'Shares Outstanding', 'Float', + '% Held by Insiders', '% Held by Institutions', 'Shares Short', + 'Short Ratio', 'Short % of Float', 'Shares Short (prior month)'] + assert all(df[positive_features] >= 0) + + # Make sure that these features have values less than 100 (the above checks that they are +ve) + fractional_features = ['% Held by Insiders', '% Held by Institutions', + 'Short Ratio', 'Short % of Float'] + assert all(df[fractional_features] <= 100) + + +def test_stock_prices_dataset(): + """ + Check that data from pandas-datareader has been downloaded correctly + """ + + df = pd.read_csv("stock_prices.csv", index_col='Date', parse_dates=True) + assert type(df.index) == pd.core.indexes.datetimes.DatetimeIndex + # Make sure that all columns have some price data + assert all(df.isnull().sum() < len(df)) + # After this, we fill in missing values with zero for test purposes + df.fillna(0, inplace=True) + assert all(df >= 0) + + # Index prices + index_df = pd.read_csv( + "sp500_index.csv", index_col='Date', parse_dates=True) + assert type(df.index) == pd.core.indexes.datetimes.DatetimeIndex + assert len(index_df.columns) == 6 + assert index_df.shape[0] == df.shape[0] + assert index_df.isnull().sum().sum() == 0 + + +def def_keystats_dimensions(): + """ + This tests that the keystats csv has been built correctly + """ + df = pd.read_csv("keystats.csv", index_col='Date') + + indexing_columns = ['Unix', 'Ticker', 'Price', + 'stock_p_change', 'SP500', 'SP500_p_change'] + n_cols = len(df.columns) + assert n_cols == len(parsing_keystats.features) + len(indexing_columns) + + # No missing data in the index columns + assert df[indexing_columns].isnull().sum().sum() == 0 + + +def test_stock_prediction_dataset(): + """ + This tests that the dataset on which we are training our algorithm has been correctly built + """ + df = pd.read_csv("keystats.csv", index_col='Date') + num_rows_with_nan = sum(df.isnull().sum(axis=1) > 0) + + X, y = stock_prediction.build_data_set() + assert X.shape[0] == df.shape[0] - num_rows_with_nan + assert len(y) == df.shape[0] - num_rows_with_nan + assert X.shape[1] == len(parsing_keystats.features) diff --git a/tests/test_utils.py b/tests/test_utils.py new file mode 100644 index 00000000..8016c5bf --- /dev/null +++ b/tests/test_utils.py @@ -0,0 +1,45 @@ +import pytest +import utils + + +def test_status_calc(): + """ + Test the status_calc function which generates training labels + """ + assert utils.status_calc(50, 20, 12.2) == 1 + assert utils.status_calc(12.003, 10, 15) == 0 + assert utils.status_calc(-10, -30, 5) == 1 + assert utils.status_calc(-31, -30, 15) == 0 + assert utils.status_calc(15, 5, 10) == 1 + + with pytest.raises(ValueError): + utils.status_calc(12, 10, -3) + + +def test_data_string_to_float(): + """ + data_string_to_float() is a function that needs to meet lots of empirical requirements + owing to the idiosyncrasies of Yahoo Finance's HTML. The main jobs are parsing negatives and + abbreviations of big numbers. + """ + assert utils.data_string_to_float("asdfNaN") == "N/A" + assert utils.data_string_to_float(">N/A\n0") == 0 + assert utils.data_string_to_float("-3") == -3 + assert utils.data_string_to_float("4K") == 4000 + assert utils.data_string_to_float("2M") == 2000000 + assert utils.data_string_to_float("0.07B") == 70000000 + assert utils.data_string_to_float("-100.1K") == -100100 + assert utils.data_string_to_float("-0.1M") == -100000 + assert utils.data_string_to_float("-0.02B") == -20000000 + assert utils.data_string_to_float("-0.00") == 0 + assert utils.data_string_to_float("0.00") == 0 + assert utils.data_string_to_float("0M") == 0 + assert utils.data_string_to_float("010K") == 10000 + + with pytest.raises(ValueError): + utils.data_string_to_float(">0x") + with pytest.raises(ValueError): + utils.data_string_to_float("10k") + with pytest.raises(ValueError): + utils.data_string_to_float("2KB") diff --git a/tests/test_variables.py b/tests/test_variables.py new file mode 100644 index 00000000..38263479 --- /dev/null +++ b/tests/test_variables.py @@ -0,0 +1,21 @@ +import pytest +import os +import parsing_keystats +import current_data +import download_historical_prices + + +def test_statspath(): + # Check that the statspath exists and is a directory + assert os.path.exists(current_data.statspath) + assert os.path.isdir(current_data.statspath) + + assert parsing_keystats.statspath == current_data.statspath + + +def test_features_same(): + # There are only four differences (intentionally) + assert set(parsing_keystats.features) - set(current_data.features) == {'Qtrly Revenue Growth', 'Qtrly Earnings Growth', + 'Shares Short (as of', 'Net Income Avl to Common'} + assert set(current_data.features) - set(parsing_keystats.features) == {'Net Income Avi to Common', 'Quarterly Earnings Growth', + 'Shares Short', 'Quarterly Revenue Growth'} diff --git a/utils.py b/utils.py new file mode 100644 index 00000000..2e0a235f --- /dev/null +++ b/utils.py @@ -0,0 +1,57 @@ +def data_string_to_float(number_string): + """ + The result of our regex search is a number stored as a string, but we need a float. + - Some of these strings say things like '25M' instead of 25000000. + - Some have 'N/A' in them. + - Some are negative (have '-' in front of the numbers). + - As an artifact of our regex, some values which were meant to be zero are instead '>0'. + We must process all of these cases accordingly. + :param number_string: the string output of our regex, which needs to be converted to a float. + :return: a float representation of the string, taking into account minus sign, unit, etc. + """ + # Deal with zeroes and the sign + if ("N/A" in number_string) or ("NaN" in number_string): + return "N/A" + elif number_string == ">0": + return 0 + elif "B" in number_string: + return float(number_string.replace("B", '')) * 1000000000 + elif "M" in number_string: + return float(number_string.replace("M", '')) * 1000000 + elif "K" in number_string: + return float(number_string.replace("K", '')) * 1000 + else: + return float(number_string) + + +def duplicate_error_check(df): + """ + A common symptom of failed parsing is when there are consecutive duplicate values. This function was used + to find the duplicates and tweak the regex. Any remaining duplicates are probably coincidences. + :param df: the dataframe to be checked + :return: Prints out a list of the rows containing duplicates, as well as the duplicated values. + """ + # Some columns often (correctly) have the same value as other columns. Remove these. + df.drop(['Unix', 'Price', 'stock_p_change', 'SP500', 'SP500_p_change', 'Float', '200-Day Moving Average', 'Short Ratio', + 'Operating Margin'], axis=1, inplace=True) + + for i in range(len(df)): + # Check if there are any duplicates. + if pd.Series(df.iloc[i] == df.iloc[i].shift()).any(): + duplicates = set([x for x in list(df.iloc[i]) + if list(df.iloc[i]).count(x) > 1]) + # A duplicate value of zero is quite common. We want other duplicates. + if duplicates != {0}: + print(i, df.iloc[i], duplicates, sep="\n") + + +def status_calc(stock, sp500, outperformance=10): + """A simple function to classify whether a stock outperformed the S&P500 + :param stock: stock price + :param sp500: S&P500 price + :param outperformance: stock is classified 1 if stock price > S&P500 price + outperformance + :return: true/false + """ + if outperformance < 0: + raise ValueError("outperformance must be positive") + return stock - sp500 >= outperformance