From fa76439278b4f14b9f51ceb99c00f77e7dda6b90 Mon Sep 17 00:00:00 2001 From: saeedamen Date: Mon, 1 Jan 2024 21:13:34 +0000 Subject: [PATCH] Upgraded to FinancePy 0.310 --- README.md | 5 +- finmarketpy/backtest/backtestengine.py | 1700 +++++++++++------ .../curve/volatility/fxoptionspricer.py | 230 ++- finmarketpy/curve/volatility/fxvolsurface.py | 245 ++- finmarketpy/util/marketconstants.py | 28 +- finmarketpy/util/marketutil.py | 14 +- .../fx_forwards_indices_examples.py | 212 +- .../fx_forwards_pricing_examples.py | 23 +- .../fx_options_indices_examples.py | 328 ++-- .../fx_options_pricing_examples.py | 298 +-- .../fx_spot_indices_examples.py | 151 +- .../fx_vol_surface_animation.py | 45 +- .../fx_vol_surface_interpolation_examples.py | 151 +- finmarketpy_examples/quandl_examples.py | 43 +- finmarketpy_examples/returns_examples.py | 40 +- finmarketpy_examples/seasonality_examples.py | 101 +- finmarketpy_examples/technicals_example.py | 39 +- .../tradingmodelfxtrend_bbg_example.py | 103 +- .../tradingmodelfxtrend_example.py | 99 +- setup.py | 44 +- 20 files changed, 2419 insertions(+), 1480 deletions(-) diff --git a/README.md b/README.md index 1ab4224..9ee7bee 100644 --- a/README.md +++ b/README.md @@ -161,7 +161,7 @@ One possible way to fix this is to delete the `__pycache__` folders underneath w Eg. if you are using the `py38class` environment, if you've installed Anaconda in `C:\Anaconda3`, you might find the financepy folder at the below location -`C:\Anaconda3\envs\py38\Lib\site-packages\financepy` +`C:\Anaconda3\envs\py38class\Lib\site-packages\financepy` # finmarketpy examples @@ -187,6 +187,9 @@ In finmarketpy/examples you will find several examples, including some simple tr # finmarketpy log +* 01 Jan 2024 + * Helper code to reduce boiler plate code for TradingModel + * Upgraded to FinancePy 0.310 and refactored FXVolSurface * 26 Apr 2023 * Changed sklearn to scikit-learn dependency * 05 Apr 2022 diff --git a/finmarketpy/backtest/backtestengine.py b/finmarketpy/backtest/backtestengine.py index 0b8b816..8ae7dc1 100644 --- a/finmarketpy/backtest/backtestengine.py +++ b/finmarketpy/backtest/backtestengine.py @@ -21,8 +21,10 @@ from findatapy.util import LoggerManager import pickle -#import zlib -#import lz4framed # conda install -c conda-forge py-lz4framed +# import zlib +# import lz4framed # conda install -c conda-forge py-lz4framed + +from typing import List, Dict # Make blosc optional (only when trying to run backtests in parallel) try: @@ -32,10 +34,14 @@ import pickle +from finmarketpy.backtest.backtestrequest import BacktestRequest + market_constants = MarketConstants() + class Backtest(object): - """Conducts backtest for strategies trading assets. Assumes we have an input of total returns. Reports historical return statistics + """Conducts backtest for strategies trading assets. Assumes we have an + input of total returns. Reports historical return statistics and returns time series. """ @@ -45,11 +51,18 @@ def __init__(self): self._portfolio = None return - def calculate_diagnostic_trading_PnL(self, asset_a_df, signal_df, further_df=[], further_df_labels=[]): + def calculate_diagnostic_trading_PnL( + self, + asset_a_df: pd.DataFrame, + signal_df: pd.DataFrame, + further_df: pd.DataFrame = [], + further_df_labels: List[str] = []) -> pd.DataFrame: """Calculates P&L table which can be used for debugging purposes. - The table is populated with asset, signal and further dataframes provided by the user, can be used to check signalling methodology. - It does not apply parameters such as transaction costs, vol adjusment and so on. + The table is populated with asset, signal and further dataframes + provided by the user, can be used to check signalling methodology. + It does not apply parameters such as transaction costs, vol adjusment + and so on. Parameters ---------- @@ -60,19 +73,22 @@ def calculate_diagnostic_trading_PnL(self, asset_a_df, signal_df, further_df=[], Trade signals (typically +1, -1, 0 etc) further_df : DataFrame - Further dataframes user wishes to output in the diagnostic output (typically inputs for the signals) + Further dataframes user wishes to output in the diagnostic output + (typically inputs for the signals) further_df_labels Labels to append to the further dataframes Returns ------- - DataFrame with asset, trading signals and returns of the trading strategy for diagnostic purposes + DataFrame with asset, trading signals and returns of the trading + strategy for diagnostic purposes """ calculations = Calculations() asset_rets_df = calculations.calculate_returns(asset_a_df) - strategy_rets = calculations.calculate_signal_returns(signal_df, asset_rets_df) + strategy_rets = calculations.calculate_signal_returns(signal_df, + asset_rets_df) reset_points = ((signal_df - signal_df.shift(1)).abs()) @@ -80,26 +96,39 @@ def calculate_diagnostic_trading_PnL(self, asset_a_df, signal_df, further_df=[], asset_a_df_entry[reset_points == 0] = np.nan asset_a_df_entry = asset_a_df_entry.ffill() - asset_a_df_entry.columns = [x + '_entry' for x in asset_a_df_entry.columns] - asset_rets_df.columns = [x + '_asset_rets' for x in asset_rets_df.columns] - strategy_rets.columns = [x + '_strat_rets' for x in strategy_rets.columns] + asset_a_df_entry.columns = [x + '_entry' for x in + asset_a_df_entry.columns] + asset_rets_df.columns = [x + '_asset_rets' for x in + asset_rets_df.columns] + strategy_rets.columns = [x + '_strat_rets' for x in + strategy_rets.columns] signal_df.columns = [x + '_final_signal' for x in signal_df.columns] for i in range(0, len(further_df)): - further_df[i].columns = [x + '_' + further_df_labels[i] for x in further_df[i].columns] + further_df[i].columns = [x + '_' + further_df_labels[i] for x in + further_df[i].columns] - flatten_df = [asset_a_df, asset_a_df_entry, asset_rets_df, strategy_rets, signal_df] + flatten_df = [asset_a_df, asset_a_df_entry, asset_rets_df, + strategy_rets, signal_df] for f in further_df: flatten_df.append(f) return calculations.join(flatten_df, how='outer') - def calculate_trading_PnL(self, br, asset_a_df, signal_df, contract_value_df, run_in_parallel): - """Calculates P&L of a trading strategy and statistics to be retrieved later - - Calculates the P&L for each asset/signal combination and also for the finally strategy applying appropriate - weighting in the portfolio, depending on predefined parameters, for example: + def calculate_trading_PnL( + self, + br: BacktestRequest, + asset_a_df: pd.DataFrame, + signal_df: pd.DataFrame, + contract_value_df: pd.DataFrame, + run_in_parallel: bool = False): + """Calculates P&L of a trading strategy and statistics to be retrieved + later + + Calculates the P&L for each asset/signal combination and also for the + finally strategy applying appropriate weighting in the portfolio, + depending on predefined parameters, for example: static weighting for each asset static weighting for each asset + vol weighting for each asset static weighting for each asset + vol weighting for each asset + vol weighting for the portfolio @@ -137,19 +166,25 @@ def calculate_trading_PnL(self, br, asset_a_df, signal_df, contract_value_df, ru logger.info("Calculating trading P&L...") signal_df = signal_df.shift(br.signal_delay) - asset_df, signal_df = calculations.join_left_fill_right(asset_a_df, signal_df) + asset_df, signal_df = calculations.join_left_fill_right(asset_a_df, + signal_df) if (contract_value_df is not None): - asset_df, contract_value_df = asset_df.align(contract_value_df, join='left', axis='index') + asset_df, contract_value_df = asset_df.align(contract_value_df, + join='left', + axis='index') contract_value_df = contract_value_df.fillna( - method='ffill') # fill down asset holidays (we won't trade on these days) + method='ffill') # fill down asset holidays (we won't trade + # on these days) - # Non-trading days of the assets (this may of course vary between the assets we are trading + # Non-trading days of the assets (this may of course vary between the + # assets we are trading # if they are from different asset classes) non_trading_days = np.isnan(asset_df.values) # Only allow signals to change on the days when we can trade assets - signal_df = signal_df.mask(non_trading_days) # fill asset holidays with NaN signals + signal_df = signal_df.mask( + non_trading_days) # fill asset holidays with NaN signals signal_df = signal_df.fillna(method='ffill') # fill these down # Transaction costs and roll costs @@ -164,7 +199,9 @@ def calculate_trading_PnL(self, br, asset_a_df, signal_df, contract_value_df, ru for i in range(0, len(asset_df_cols)): pnl_cols.append(asset_df_cols[i] + " / " + signal_cols[i]) - asset_df = asset_df.fillna(method='ffill') # fill down asset holidays (we won't trade on these days) + # Fill down asset holidays (we won't trade on these days) + asset_df = asset_df.fillna( + method='ffill') returns_df = calculations.calculate_returns(asset_df) # Apply a stop loss/take profit to every trade if this has been specified @@ -173,17 +210,25 @@ def calculate_trading_PnL(self, br, asset_a_df, signal_df, contract_value_df, ru returns_df = calculations.calculate_returns(asset_df) # Makes assumption that signal column order matches that of returns - temp_strategy_rets_df = calculations.calculate_signal_returns_as_matrix(signal_df, returns_df) + temp_strategy_rets_df = calculations.calculate_signal_returns_as_matrix( + signal_df, returns_df) - trade_rets_df = calculations.calculate_cum_rets_trades(signal_df, temp_strategy_rets_df) + trade_rets_df = calculations.calculate_cum_rets_trades(signal_df, + temp_strategy_rets_df) # pre_signal_df = signal_df.copy() - signal_df = calculations.calculate_risk_stop_signals(signal_df, trade_rets_df, br.stop_loss, br.take_profit) + signal_df = calculations.calculate_risk_stop_signals(signal_df, + trade_rets_df, + br.stop_loss, + br.take_profit) - # make sure we can't trade where asset price is undefined and carry over signal - signal_df = signal_df.mask(non_trading_days) # fill asset holidays with NaN signals - signal_df = signal_df.fillna(method='ffill') # fill these down (when asset is not trading + # Make sure we can't trade where asset price is undefined and + # carry over signal + signal_df = signal_df.mask( + non_trading_days) # fill asset holidays with NaN signals + signal_df = signal_df.fillna( + method='ffill') # fill these down (when asset is not trading # for debugging purposes # if True: @@ -220,45 +265,59 @@ def calculate_trading_PnL(self, br, asset_a_df, signal_df, contract_value_df, ru length_cols = len(signal_df.columns) position_clip_adjustment_matrix = np.transpose( - np.repeat(position_clip_adjustment.values.flatten()[np.newaxis, :], length_cols, 0)) + np.repeat( + position_clip_adjustment.values.flatten()[np.newaxis, :], + length_cols, 0)) # Recalculate portfolio signals after adjustment (for individual components - without # weighting each signal separately) portfolio_signal_before_weighting = pd.DataFrame( - data=(portfolio_signal_before_weighting.values * position_clip_adjustment_matrix), + data=( + portfolio_signal_before_weighting.values * position_clip_adjustment_matrix), index=portfolio_signal_before_weighting.index, columns=portfolio_signal_before_weighting.columns) - # Recalculate portfolio signal after adjustment (for portfolio level positions) + # Recalculate portfolio signal after adjustment (for portfolio + # level positions) portfolio_signal = pd.DataFrame( - data=(portfolio_signal.values * position_clip_adjustment_matrix), + data=( + portfolio_signal.values * position_clip_adjustment_matrix), index=portfolio_signal.index, columns=portfolio_signal.columns) - # Recalculate portfolio leverage with position constraint (multiply vectors elementwise) + # Recalculate portfolio leverage with position constraint + # (multiply vectors elementwise) portfolio_leverage_df = pd.DataFrame( - data=(portfolio_leverage_df.values * position_clip_adjustment.values), + data=( + portfolio_leverage_df.values * position_clip_adjustment.values), index=portfolio_leverage_df.index, columns=portfolio_leverage_df.columns) - # Recalculate total long, short, net and absolute exposures of the whole portfolio after the position + # Recalculate total long, short, net and absolute exposures of the + # whole portfolio after the position # clip adjustment portfolio_total_longs, portfolio_total_shorts, portfolio_net_exposure, portfolio_total_exposure \ = self.calculate_exposures(portfolio_signal) # Calculate final portfolio returns with the amended portfolio leverage (by default just 1s) - portfolio = calculations.calculate_signal_returns_with_tc_matrix(portfolio_leverage_df, portfolio, tc=tc, rc=rc) + portfolio = calculations.calculate_signal_returns_with_tc_matrix( + portfolio_leverage_df, portfolio, tc=tc, rc=rc) # Assign all the property variables # Trim them if we have asked for a different plot start/finish - self._signal = self._filter_by_plot_start_finish_date(signal_df, br) # individual signals (before portfolio leverage) - self._portfolio_signal = self._filter_by_plot_start_finish_date(portfolio_signal, br) # individual signals (AFTER portfolio leverage/constraints) - self._portfolio_leverage = self._filter_by_plot_start_finish_date(portfolio_leverage_df, br) # leverage on portfolio + self._signal = self._filter_by_plot_start_finish_date(signal_df, + br) # individual signals (before portfolio leverage) + self._portfolio_signal = self._filter_by_plot_start_finish_date( + portfolio_signal, + br) # individual signals (AFTER portfolio leverage/constraints) + self._portfolio_leverage = self._filter_by_plot_start_finish_date( + portfolio_leverage_df, br) # leverage on portfolio self._portfolio = self._filter_by_plot_start_finish_date(portfolio, br) # Calculate each period of trades - self._portfolio_trade = self._portfolio_signal - self._portfolio_signal.shift(1) + self._portfolio_trade = self._portfolio_signal - self._portfolio_signal.shift( + 1) # Expressing trades/positions in terms of notionals self._portfolio_signal_notional = None @@ -268,10 +327,14 @@ def calculate_trading_PnL(self, br, asset_a_df, signal_df, contract_value_df, ru self._portfolio_signal_contracts = None self._portfolio_signal_trade_contracts = None - self._portfolio_total_longs = self._filter_by_plot_start_finish_date(portfolio_total_longs, br) - self._portfolio_total_shorts = self._filter_by_plot_start_finish_date(portfolio_total_shorts, br) - self._portfolio_net_exposure = self._filter_by_plot_start_finish_date(portfolio_net_exposure, br) - self._portfolio_total_exposure = self._filter_by_plot_start_finish_date(portfolio_total_exposure, br) + self._portfolio_total_longs = self._filter_by_plot_start_finish_date( + portfolio_total_longs, br) + self._portfolio_total_shorts = self._filter_by_plot_start_finish_date( + portfolio_total_shorts, br) + self._portfolio_net_exposure = self._filter_by_plot_start_finish_date( + portfolio_net_exposure, br) + self._portfolio_total_exposure = self._filter_by_plot_start_finish_date( + portfolio_total_exposure, br) self._portfolio_total_longs_notional = None self._portfolio_total_shorts_notional = None @@ -279,14 +342,19 @@ def calculate_trading_PnL(self, br, asset_a_df, signal_df, contract_value_df, ru self._portfolio_total_exposure_notional = None self._portfolio_signal_trade_notional_sizes = None - # Individual signals P&L (before portfolio volatility targeting, position limits etc) + # Individual signals P&L (before portfolio volatility targeting, + # position limits etc) self._pnl = pnl - self._individual_leverage = self._filter_by_plot_start_finish_date(individual_leverage_df, br) + self._individual_leverage = self._filter_by_plot_start_finish_date( + individual_leverage_df, br) - # P&L components of individual assets after all the portfolio level risk signals and position limits have been applied - self._components_pnl = self._filter_by_plot_start_finish_date(calculations.calculate_signal_returns_with_tc_matrix(portfolio_signal_before_weighting, - returns_df, tc=tc, rc=rc), br) + # P&L components of individual assets after all the portfolio level + # risk signals and position limits have been applied + self._components_pnl = self._filter_by_plot_start_finish_date( + calculations.calculate_signal_returns_with_tc_matrix( + portfolio_signal_before_weighting, + returns_df, tc=tc, rc=rc), br) self._components_pnl.columns = pnl_cols # TODO FIX very slow - hence only calculate on demand @@ -299,9 +367,13 @@ def calculate_trading_PnL(self, br, asset_a_df, signal_df, contract_value_df, ru self._portfolio.columns = ['Port'] - self._pnl_ret_stats = RetStats(self._pnl, br.ann_factor, br.resample_ann_factor) - self._components_pnl_ret_stats = RetStats(self._components_pnl, br.ann_factor, br.resample_ann_factor) - self._portfolio_ret_stats = RetStats(self._portfolio, br.ann_factor, br.resample_ann_factor) + self._pnl_ret_stats = RetStats(self._pnl, br.ann_factor, + br.resample_ann_factor) + self._components_pnl_ret_stats = RetStats(self._components_pnl, + br.ann_factor, + br.resample_ann_factor) + self._portfolio_ret_stats = RetStats(self._portfolio, br.ann_factor, + br.resample_ann_factor) # Also create other measures of portfolio # * portfolio & trades in terms of a predefined notional (in USD) @@ -311,14 +383,17 @@ def calculate_trading_PnL(self, br, asset_a_df, signal_df, contract_value_df, ru # Express positions in terms of the notional size specified self._portfolio_signal_notional = self._portfolio_signal * br.portfolio_notional_size self._portfolio_signal_trade_notional = self._portfolio_signal_notional \ - - self._portfolio_signal_notional.shift(1) + - self._portfolio_signal_notional.shift( + 1) df_trades_sizes = pd.DataFrame() - rounded_portfolio_signal_trade_notional = self._portfolio_signal_trade_notional.round(2) + rounded_portfolio_signal_trade_notional = self._portfolio_signal_trade_notional.round( + 2) for k in rounded_portfolio_signal_trade_notional.columns: - df_trades_sizes[k] = pd.value_counts(rounded_portfolio_signal_trade_notional[k], sort=True) + df_trades_sizes[k] = pd.value_counts( + rounded_portfolio_signal_trade_notional[k], sort=True) df_trades_sizes = df_trades_sizes[df_trades_sizes.index != 0] @@ -331,39 +406,50 @@ def calculate_trading_PnL(self, br, asset_a_df, signal_df, contract_value_df, ru # Get the positions in terms of the contract sizes notional_copy = self._portfolio_signal_notional.copy(deep=True) - notional_copy_cols = [x.split('.')[0] for x in notional_copy.columns] - notional_copy_cols = [x + '.contract-value' for x in notional_copy_cols] + notional_copy_cols = [x.split('.')[0] for x in + notional_copy.columns] + notional_copy_cols = [x + '.contract-value' for x in + notional_copy_cols] notional_copy.columns = notional_copy_cols # Can only give contract sizes if these are defined if contract_value_df is not None: contract_value_df = contract_value_df[notional_copy_cols] - notional_df, contract_value_df = notional_copy.align(contract_value_df, join='left', axis='index') + notional_df, contract_value_df = notional_copy.align( + contract_value_df, join='left', axis='index') # Careful make sure orders of magnitude are same for the notional and the contract value self._portfolio_signal_contracts = notional_df / contract_value_df self._portfolio_signal_contracts.columns = self._portfolio_signal_notional.columns self._portfolio_signal_trade_contracts = self._portfolio_signal_contracts \ - - self._portfolio_signal_contracts.shift(1) + - self._portfolio_signal_contracts.shift( + 1) # TODO parallel version still work in progress! - logger.info("Cumulative index calculations") - if False: # market_constants.backtest_thread_no[market_constants.generic_plat] > 1 and run_in_parallel: - swim_pool = SwimPool(multiprocessing_library=market_constants.multiprocessing_library) + if False: # market_constants.backtest_thread_no[market_constants.generic_plat] > 1 and run_in_parallel: + swim_pool = SwimPool( + multiprocessing_library=market_constants.multiprocessing_library) - pool = swim_pool.create_pool(thread_technique=market_constants.backtest_thread_technique, - thread_no=market_constants.backtest_thread_no[market_constants.generic_plat]) + pool = swim_pool.create_pool( + thread_technique=market_constants.backtest_thread_technique, + thread_no=market_constants.backtest_thread_no[ + market_constants.generic_plat]) r1 = pool.apply_async(self._pnl_ret_stats.calculate_ret_stats) - r2 = pool.apply_async(self._components_pnl_ret_stats.calculate_ret_stats) - r3 = pool.apply_async(self._portfolio_ret_stats.calculate_ret_stats) - - resultsA = pool.apply_async(calculations.create_mult_index, args=(self._pnl,)) - resultsB = pool.apply_async(calculations.create_mult_index, args=(self._components_pnl,)) - resultsC = pool.apply_async(calculations.create_mult_index, args=(self._portfolio,)) + r2 = pool.apply_async( + self._components_pnl_ret_stats.calculate_ret_stats) + r3 = pool.apply_async( + self._portfolio_ret_stats.calculate_ret_stats) + + resultsA = pool.apply_async(calculations.create_mult_index, + args=(self._pnl,)) + resultsB = pool.apply_async(calculations.create_mult_index, + args=(self._components_pnl,)) + resultsC = pool.apply_async(calculations.create_mult_index, + args=(self._portfolio,)) swim_pool.close_pool(pool) @@ -376,10 +462,12 @@ def calculate_trading_PnL(self, br, asset_a_df, signal_df, contract_value_df, ru self._portfolio_cum = resultsC.get() else: - # Calculate return statistics of the each asset/signal after signal leverage (but before portfolio level constraints) + # Calculate return statistics of the each asset/signal after signal + # leverage (but before portfolio level constraints) # self._ret_stats_pnl.calculate_ret_stats() - # Calculate return statistics of the each asset/signal after signal leverage AND after portfolio level constraints + # Calculate return statistics of the each asset/signal after signal + # leverage AND after portfolio level constraints # self._ret_stats_pnl_components.calculate_ret_stats() # Calculate return statistics of the final portfolio @@ -387,22 +475,28 @@ def calculate_trading_PnL(self, br, asset_a_df, signal_df, contract_value_df, ru # Calculate final portfolio cumulative P&L if br.cum_index == 'mult': - # Calculate individual signals cumulative P&L after signal leverage but before portfolio level constraints + # Calculate individual signals cumulative P&L after signal + # leverage but before portfolio level constraints self._pnl_cum = calculations.create_mult_index(self._pnl) - # Calculate individual signals cumulative P&L after signal leverage AND after portfolio level constraints - self._components_pnl_cum = calculations.create_mult_index(self._components_pnl) + # Calculate individual signals cumulative P&L after signal + # leverage AND after portfolio level constraints + self._components_pnl_cum = calculations.create_mult_index( + self._components_pnl) - self._portfolio_cum = calculations.create_mult_index(self._portfolio) # portfolio cumulative P&L + self._portfolio_cum = calculations.create_mult_index( + self._portfolio) # portfolio cumulative P&L elif br.cum_index == 'add': # Calculate individual signals cumulative P&L after signal leverage but before portfolio level constraints self._pnl_cum = calculations.create_add_index(self._pnl) # Calculate individual signals cumulative P&L after signal leverage AND after portfolio level constraints - self._components_pnl_cum = calculations.create_add_index(self._components_pnl) + self._components_pnl_cum = calculations.create_add_index( + self._components_pnl) - self._portfolio_cum = calculations.create_add_index(self._portfolio) # portfolio cumulative P&L + self._portfolio_cum = calculations.create_add_index( + self._portfolio) # portfolio cumulative P&L logger.info("Completed cumulative index calculations") @@ -410,7 +504,10 @@ def calculate_trading_PnL(self, br, asset_a_df, signal_df, contract_value_df, ru self._components_pnl_cum.columns = pnl_cols self._portfolio_cum.columns = ['Port'] - def _filter_by_plot_start_finish_date(self, df, br): + def _filter_by_plot_start_finish_date( + self, + df: pd.DataFrame, + br: BacktestRequest) -> pd.DataFrame: if br.plot_start is None and br.plot_finish is None: return df @@ -427,15 +524,20 @@ def _filter_by_plot_start_finish_date(self, df, br): if br.plot_finish is not None: plot_finish = br.plot_finish - return filter.filter_time_series_by_date(plot_start, plot_finish, df) + return filter.filter_time_series_by_date(plot_start, plot_finish, + df) - def calculate_exposures(self, portfolio_signal): - """Calculates time series for the total longs, short, net and absolute exposure on an aggregated portfolio basis. + def calculate_exposures(self, + portfolio_signal: pd.DataFrame) -> List[ + pd.DataFrame]: + """Calculates time series for the total longs, short, net and absolute + exposure on an aggregated portfolio basis. Parameters ---------- portfolio_signal : DataFrame - Signals for each asset in the portfolio after all weighting, portfolio & signal level volatility adjustments + Signals for each asset in the portfolio after all weighting, + portfolio & signal level volatility adjustments Returns ------- @@ -444,8 +546,10 @@ def calculate_exposures(self, portfolio_signal): """ # Calculate total portfolio longs/total portfolio shorts/total portfolio exposure - portfolio_total_longs = pd.DataFrame(portfolio_signal[portfolio_signal > 0].sum(axis=1)) - portfolio_total_shorts = pd.DataFrame(portfolio_signal[portfolio_signal < 0].sum(axis=1)) + portfolio_total_longs = pd.DataFrame( + portfolio_signal[portfolio_signal > 0].sum(axis=1)) + portfolio_total_shorts = pd.DataFrame( + portfolio_signal[portfolio_signal < 0].sum(axis=1)) portfolio_total_longs.columns = ['Total Longs'] portfolio_total_shorts.columns = ['Total Shorts'] @@ -465,7 +569,7 @@ def backtest_output(self): return ### Get PnL of individual assets before portfolio constraints - def pnl(self): + def pnl(self) -> pd.DataFrame: """Gets P&L returns of all the individual sub_components of the model (before any portfolio level leverage is applied) Returns @@ -474,7 +578,7 @@ def pnl(self): """ return self._pnl - def trade_no(self): + def trade_no(self) -> pd.DataFrame: """Gets number of trades for each signal in the backtest (before Returns @@ -488,7 +592,7 @@ def trade_no(self): return self._trade_no - def pnl_trades(self): + def pnl_trades(self) -> pd.DataFrame: """Gets P&L of each individual trade per signal Returns @@ -498,11 +602,12 @@ def pnl_trades(self): if self._pnl_trades is None: calculations = Calculations() - self._pnl_trades = calculations.calculate_individual_trade_gains(self._signal, self._pnl) + self._pnl_trades = calculations.calculate_individual_trade_gains( + self._signal, self._pnl) return self._pnl_trades - def pnl_desc(self): + def pnl_desc(self) -> pd.DataFrame: """Gets P&L return statistics in a string format Returns @@ -511,7 +616,7 @@ def pnl_desc(self): """ return self._ret_stats_signals.summary() - def pnl_ret_stats(self): + def pnl_ret_stats(self) -> pd.DataFrame: """Gets P&L return statistics of individual strategies as class to be queried Returns @@ -521,7 +626,7 @@ def pnl_ret_stats(self): return self._pnl_ret_stats - def pnl_cum(self): + def pnl_cum(self) -> pd.DataFrame: """Gets P&L as a cumulative time series of individual assets Returns @@ -532,7 +637,7 @@ def pnl_cum(self): return self._pnl_cum ### Get PnL of individual assets AFTER portfolio constraints - def components_pnl(self): + def components_pnl(self) -> pd.DataFrame: """Gets P&L returns of all the individual subcomponents of the model (after portfolio level leverage is applied) Returns @@ -541,7 +646,7 @@ def components_pnl(self): """ return self._components_pnl - def components_pnl_trades(self): + def components_pnl_trades(self) -> pd.DataFrame: """Gets P&L of each individual trade per signal Returns @@ -551,12 +656,13 @@ def components_pnl_trades(self): if self._components_pnl_trades is None: calculations = Calculations() - self._components_pnl_trades = calculations.calculate_individual_trade_gains(self._signal, - self._components_pnl) + self._components_pnl_trades = calculations.calculate_individual_trade_gains( + self._signal, + self._components_pnl) return self._components_pnl_trades - def components_pnl_desc(self): + def components_pnl_desc(self) -> pd.DataFrame: """Gets P&L of individual components as return statistics in a string format Returns @@ -565,7 +671,7 @@ def components_pnl_desc(self): """ # return self._ret_stats_signals.summary() - def components_pnl_ret_stats(self): + def components_pnl_ret_stats(self) -> pd.DataFrame: """Gets P&L return statistics of individual strategies as class to be queried Returns @@ -575,7 +681,7 @@ def components_pnl_ret_stats(self): return self._components_pnl_ret_stats - def components_pnl_cum(self): + def components_pnl_cum(self) -> pd.DataFrame: """Gets P&L as a cumulative time series of individual assets (after portfolio level leverage adjustments) Returns @@ -587,7 +693,7 @@ def components_pnl_cum(self): ### Get PnL of the final portfolio - def portfolio_cum(self): + def portfolio_cum(self) -> pd.DataFrame: """Gets P&L as a cumulative time series of portfolio Returns @@ -597,7 +703,7 @@ def portfolio_cum(self): return self._portfolio_cum - def portfolio_pnl(self): + def portfolio_pnl(self) -> pd.DataFrame: """Gets portfolio returns in raw form (ie. not indexed into cumulative form) Returns @@ -607,7 +713,7 @@ def portfolio_pnl(self): return self._portfolio - def portfolio_pnl_desc(self): + def portfolio_pnl_desc(self) -> pd.DataFrame: """Gets P&L return statistics of portfolio as string Returns @@ -617,7 +723,7 @@ def portfolio_pnl_desc(self): return self._portfolio_ret_stats.summary() - def portfolio_pnl_ret_stats(self): + def portfolio_pnl_ret_stats(self) -> pd.DataFrame: """Gets P&L return statistics of portfolio as class to be queried Returns @@ -627,7 +733,7 @@ def portfolio_pnl_ret_stats(self): return self._portfolio_ret_stats - def individual_leverage(self): + def individual_leverage(self) -> pd.DataFrame: """Gets leverage for each asset historically Returns @@ -637,7 +743,7 @@ def individual_leverage(self): return self._individual_leverage - def portfolio_leverage(self): + def portfolio_leverage(self) -> pd.DataFrame: """Gets the leverage for the portfolio Returns @@ -647,7 +753,7 @@ def portfolio_leverage(self): return self._portfolio_leverage - def portfolio_trade_no(self): + def portfolio_trade_no(self) -> pd.DataFrame: """Gets number of trades for each signal in the backtest (after both signal and portfolio level vol adjustment) Returns @@ -657,11 +763,12 @@ def portfolio_trade_no(self): if self._portfolio_trade_no is None: calculations = Calculations() - self._portfolio_trade_no = calculations.calculate_trade_no(self._portfolio_signal) + self._portfolio_trade_no = calculations.calculate_trade_no( + self._portfolio_signal) return self._portfolio_trade_no - def portfolio_signal(self): + def portfolio_signal(self) -> pd.DataFrame: """Gets the signals (with individual leverage & portfolio leverage) for each asset, which equates to what we would trade in practice @@ -672,7 +779,7 @@ def portfolio_signal(self): return self._portfolio_signal - def portfolio_total_longs(self): + def portfolio_total_longs(self) -> pd.DataFrame: """Gets the total long exposure in the portfolio Returns @@ -682,7 +789,7 @@ def portfolio_total_longs(self): return self._portfolio_total_longs - def portfolio_total_shorts(self): + def portfolio_total_shorts(self) -> pd.DataFrame: """Gets the total short exposure in the portfolio Returns @@ -692,7 +799,7 @@ def portfolio_total_shorts(self): return self._portfolio_total_shorts - def portfolio_net_exposure(self): + def portfolio_net_exposure(self) -> pd.DataFrame: """Gets the total net exposure of the portfolio Returns @@ -702,7 +809,7 @@ def portfolio_net_exposure(self): return self._portfolio_net_exposure - def portfolio_total_exposure(self): + def portfolio_total_exposure(self) -> pd.DataFrame: """Gets the total absolute exposure of the portfolio Returns @@ -712,7 +819,7 @@ def portfolio_total_exposure(self): return self._portfolio_total_exposure - def portfolio_total_longs_notional(self): + def portfolio_total_longs_notional(self) -> pd.DataFrame: """Gets the total long exposure in the portfolio scaled by notional Returns @@ -722,7 +829,7 @@ def portfolio_total_longs_notional(self): return self._portfolio_total_longs_notional - def portfolio_total_shorts_notional(self): + def portfolio_total_shorts_notional(self) -> pd.DataFrame: """Gets the total short exposure in the portfolio scaled by notional Returns @@ -732,7 +839,7 @@ def portfolio_total_shorts_notional(self): return self._portfolio_total_shorts_notional - def portfolio_net_exposure_notional(self): + def portfolio_net_exposure_notional(self) -> pd.DataFrame: """Gets the total net exposure of the portfolio scaled by notional Returns @@ -742,7 +849,7 @@ def portfolio_net_exposure_notional(self): return self._portfolio_net_exposure_notional - def portfolio_total_exposure_notional(self): + def portfolio_total_exposure_notional(self) -> pd.DataFrame: """Gets the total absolute exposure of the portfolio scaled by notional Returns @@ -752,7 +859,7 @@ def portfolio_total_exposure_notional(self): return self._portfolio_total_exposure_notional - def portfolio_trade(self): + def portfolio_trade(self) -> pd.DataFrame: """Gets the trades (with individual leverage & portfolio leverage) for each asset, which we'd need to execute @@ -763,7 +870,7 @@ def portfolio_trade(self): return self._portfolio_trade - def portfolio_signal_notional(self): + def portfolio_signal_notional(self) -> pd.DataFrame: """Gets the signals (with individual leverage & portfolio leverage) for each asset, which equates to what we would have a positions in practice, scaled by a notional amount we have already specified @@ -774,7 +881,7 @@ def portfolio_signal_notional(self): return self._portfolio_signal_notional - def portfolio_trade_notional(self): + def portfolio_trade_notional(self) -> pd.DataFrame: """Gets the trades (with individual leverage & portfolio leverage) for each asset, which we'd need to execute, scaled by a notional amount we have already specified @@ -785,7 +892,7 @@ def portfolio_trade_notional(self): return self._portfolio_signal_trade_notional - def portfolio_trade_notional_sizes(self): + def portfolio_trade_notional_sizes(self) -> pd.DataFrame: """Gets the number of trades (with individual leverage & portfolio leverage) for each asset, which we'd need to execute, scaled by a notional amount we have already specified @@ -796,7 +903,7 @@ def portfolio_trade_notional_sizes(self): return self._portfolio_signal_trade_notional_sizes - def portfolio_signal_contracts(self): + def portfolio_signal_contracts(self) -> pd.DataFrame: """Gets the signals (with individual leverage & portfolio leverage) for each asset, which equates to what we would have a positions in practice, scaled by a notional amount and into contract sizes (eg. for futures) which we need to specify in another dataframe @@ -808,7 +915,7 @@ def portfolio_signal_contracts(self): return self._portfolio_signal_contracts - def portfolio_trade_contracts(self): + def portfolio_trade_contracts(self) -> pd.DataFrame: """Gets the trades (with individual leverage & portfolio leverage) for each asset, which we'd need to execute, scaled by a notional amount we have already specified and into contract sizes (eg. for futures) which we need to specify in another dataframe @@ -820,7 +927,7 @@ def portfolio_trade_contracts(self): return self._portfolio_signal_trade_contracts - def signal(self): + def signal(self) -> pd.DataFrame: """Gets signal for each asset (with individual leverage, but excluding portfolio leverage constraints) for each asset Returns @@ -842,6 +949,7 @@ def signal(self): from finmarketpy.economics import TechParams from findatapy.timeseries import Calculations, RetStats, Filter + class TradingModel(object): """Abstract class which wraps around Backtest, providing convenient functions for analaysis. Implement your own subclasses of this for your own strategy. See tradingmodelfxtrend_example.py for a simple implementation of a @@ -866,11 +974,11 @@ class TradingModel(object): # logger = LoggerManager().getLogger(__name__) def __init__(self): - pass + self.br = self.load_parameters() # To be implemented by every trading strategy @abc.abstractmethod - def load_parameters(self, br=None): + def load_parameters(self, br: BacktestRequest = None): """Fills parameters for the backtest, such as start-end dates, transaction costs etc. To be implemented by subclass. Can overwrite it with our own BacktestRequest. """ @@ -884,7 +992,12 @@ def load_assets(self, br=None): return @abc.abstractmethod - def construct_signal(self, spot_df, spot_df2, tech_params, br, run_in_parallel=False): + def construct_signal(self, + spot_df: pd.DataFrame = None, + spot_df2: pd.DataFrame = None, + tech_params: TechParams = None, + br: BacktestRequest = None, + run_in_parallel: bool = False) -> pd.DataFrame: """Constructs signal from pre-loaded time series Parameters @@ -923,16 +1036,17 @@ def load_model(path): return pkl ####### Generic functions for every backtest - def construct_strategy(self, br=None, run_in_parallel=False): + def construct_strategy(self, br: BacktestRequest = None): """Constructs the returns for all the strategies which have been specified. - It gets backtesting parameters from fill_backtest_request (although these can be overwritten - and then market data from fill_assets + It gets backtesting parameters from fill_backtest_request (although + these can be overwritten and then market data from fill_assets Parameters ---------- br : BacktestRequest - Parameters which define the backtest (for example start date, end date, transaction costs etc. + Parameters which define the backtest (for example start date, + end date, transaction costs etc. """ logger = LoggerManager().getLogger(__name__) @@ -951,17 +1065,56 @@ def construct_strategy(self, br=None, run_in_parallel=False): except: market_data = self.load_assets() - asset_df = market_data[0] - spot_df = market_data[1] - spot_df2 = market_data[2] - basket_dict = market_data[3] - # contract_value_df = market_data[4] - - # optional database output + asset_df = None + spot_df = None + spot_df2 = None + basket_dict = {} contract_value_df = None - if len(market_data) == 5: - contract_value_df = market_data[4] + if isinstance(market_data, tuple) or isinstance(market_data, list): + asset_df = market_data[0] + spot_df = market_data[1] + spot_df2 = market_data[2] + basket_dict = market_data[3] + + # optional database output + + if len(market_data) == 5: + contract_value_df = market_data[4] + + elif isinstance(market_data, dict): + if "asset_df" in market_data: + asset_df = market_data["asset_df"] + + if "spot_df" in market_data: + spot_df = market_data["spot_df"] + else: + spot_df = market_data["asset_df"] + + if "spot_df2" in market_data: + spot_df2 = market_data["spot_df2"] + + if "basket_dict" in market_data: + basket_dict = market_data["basket_dict"] + else: + try: + final_strategy = self.FINAL_STRATEGY + except: + final_strategy = "Basket" + + tickers = [] + + for tick in asset_df.columns: + if "." in tick: + tick = tick.split(".")[0] + + basket_dict[tick] = [tick] + tickers.append(tick) + + basket_dict[final_strategy] = tickers + + if "contract_value_df" in market_data: + contract_value_df = market_data["contract_value_df"] if hasattr(br, 'tech_params'): tech_params = br.tech_params @@ -979,16 +1132,20 @@ def construct_strategy(self, br=None, run_in_parallel=False): bask_keys = basket_dict.keys() # Each portfolio key calculate returns - can put parts of the portfolio in the key - if market_constants.backtest_thread_no[market_constants.generic_plat] > 1 and run_in_parallel: - swim_pool = SwimPool(multiprocessing_library=market_constants.multiprocessing_library) + if market_constants.backtest_thread_no[ + market_constants.generic_plat] > 1 and run_in_parallel: + swim_pool = SwimPool( + multiprocessing_library=market_constants.multiprocessing_library) - pool = swim_pool.create_pool(thread_technique=market_constants.backtest_thread_technique, - thread_no=market_constants.backtest_thread_no[market_constants.generic_plat]) + pool = swim_pool.create_pool( + thread_technique=market_constants.backtest_thread_technique, + thread_no=market_constants.backtest_thread_no[ + market_constants.generic_plat]) mult_results = [] - #start = asset_df.index[0] - #finish = asset_df.index[-1] + # start = asset_df.index[0] + # finish = asset_df.index[-1] # calculate sub substrategies in sub-processes # TODO cut up in time chunks @@ -997,13 +1154,18 @@ def construct_strategy(self, br=None, run_in_parallel=False): if key != self.FINAL_STRATEGY: logger.info("Calculating (parallel) " + key) - asset_cut_df = asset_df[[x + '.' + br.trading_field for x in basket_dict[key]]] - spot_cut_df = spot_df[[x + '.' + br.trading_field for x in basket_dict[key]]] + asset_cut_df = asset_df[ + [x + "." + br.trading_field for x in basket_dict[key]]] + spot_cut_df = spot_df[ + [x + "." + br.trading_field for x in basket_dict[key]]] mult_results.append( - pool.apply_async(self.construct_individual_strategy, args=(br, spot_cut_df, spot_df2, asset_cut_df, - tech_params, key, contract_value_df, - False, True,)) + pool.apply_async(self.construct_individual_strategy, + args=(br, spot_cut_df, spot_df2, + asset_cut_df, + tech_params, key, + contract_value_df, + False, True,)) ) # Calculate final strategy separately in my main process (so don't have issues with pickling back large output) @@ -1011,16 +1173,18 @@ def construct_strategy(self, br=None, run_in_parallel=False): logger.info("Calculating final strategy " + self.FINAL_STRATEGY) # Calculate the final strategy separately (can often be a lot larger) - asset_cut_df = asset_df[[x + '.' + br.trading_field for x in basket_dict[self.FINAL_STRATEGY]]] - spot_cut_df = spot_df[[x + '.' + br.trading_field for x in basket_dict[self.FINAL_STRATEGY]]] + asset_cut_df = asset_df[[x + "." + br.trading_field for x in + basket_dict[self.FINAL_STRATEGY]]] + spot_cut_df = spot_df[[x + "." + br.trading_field for x in + basket_dict[self.FINAL_STRATEGY]]] desc, results, leverage, stats, key, backtest = \ - self.construct_individual_strategy(br, spot_cut_df, spot_df2, - asset_cut_df, - tech_params, - self.FINAL_STRATEGY, - contract_value_df, True, - False) + self.construct_individual_strategy(br, spot_cut_df, spot_df2, + asset_cut_df, + tech_params, + self.FINAL_STRATEGY, + contract_value_df, True, + False) results.columns = desc @@ -1052,15 +1216,19 @@ def construct_strategy(self, br=None, run_in_parallel=False): for key in bask_keys: logger.info("Calculating (single thread) " + key) - asset_cut_df = asset_df[[x + '.' + br.trading_field for x in basket_dict[key]]] - spot_cut_df = spot_df[[x + '.' + br.trading_field for x in basket_dict[key]]] + asset_cut_df = asset_df[ + [x + "." + br.trading_field for x in basket_dict[key]]] + spot_cut_df = spot_df[ + [x + "." + br.trading_field for x in basket_dict[key]]] desc, results, leverage, ret_stats, key, backtest = \ - self.construct_individual_strategy(br, spot_cut_df, spot_df2, asset_cut_df, - tech_params, key, - contract_value_df, False, False) + self.construct_individual_strategy(br, spot_cut_df, + spot_df2, asset_cut_df, + tech_params, key, + contract_value_df, + False, False) - #results = backtest.portfolio_cum() + # results = backtest.portfolio_cum() results.columns = desc cum_results[results.columns[0]] = results @@ -1073,13 +1241,16 @@ def construct_strategy(self, br=None, run_in_parallel=False): # Get benchmark for comparison benchmark = self.construct_strategy_benchmark() - cum_results_benchmark = self.compare_strategy_vs_benchmark(br, cum_results, benchmark) + cum_results_benchmark = self.compare_strategy_vs_benchmark(br, + cum_results, + benchmark) self._strategy_group_benchmark_pnl_ret_stats = ret_stats_results try: ret_stats_list = ret_stats_results - ret_stats_list['Benchmark'] = (self._strategy_benchmark_pnl_ret_stats) + ret_stats_list["Benchmark"] = ( + self._strategy_benchmark_pnl_ret_stats) self._strategy_group_benchmark_pnl_ret_stats = ret_stats_list except: pass @@ -1091,7 +1262,9 @@ def construct_strategy(self, br=None, run_in_parallel=False): self._strategy_group_leverage = port_leverage self._strategy_group_benchmark_pnl = cum_results_benchmark - def _assign_final_strategy_results(self, results, backtest): + def _assign_final_strategy_results(self, + results: pd.DataFrame, + backtest: Backtest): # For a key, designated as the final strategy save that as the "strategy" # backtest.pnl_cum() @@ -1137,8 +1310,16 @@ def _assign_final_strategy_results(self, results, backtest): self._strategy_net_exposure_notional = backtest.portfolio_net_exposure_notional() self._strategy_total_exposure_notional = backtest.portfolio_total_exposure_notional() - def construct_individual_strategy(self, br, spot_df, spot_df2, asset_df, tech_params, key, contract_value_df, - run_in_parallel, compress_output): + def construct_individual_strategy( + self, br: BacktestRequest, + spot_df: pd.DataFrame, + spot_df2: pd.DataFrame, + asset_df: pd.DataFrame, + tech_params: TechParams, + key: str, + contract_value_df: pd.DataFrame, + run_in_parallel: bool, + compress_output: bool): """Combines the signal with asset returns to find the returns of an individual strategy Parameters @@ -1169,7 +1350,9 @@ def construct_individual_strategy(self, br, spot_df, spot_df2, asset_df, tech_pa logger.info("Calculating trading signals for " + key + "...") - signal = self.construct_signal(spot_df, spot_df2, tech_params, br, run_in_parallel=run_in_parallel) + signal = self.construct_signal(spot_df=spot_df, spot_df2=spot_df2, + tech_params=tech_params, br=br, + run_in_parallel=run_in_parallel) logger.info("Calculated trading signals for " + key) @@ -1177,7 +1360,8 @@ def construct_individual_strategy(self, br, spot_df, spot_df2, asset_df, tech_pa contract_value_df, run_in_parallel) # calculate P&L (and adjust signals for vol etc) - if br.write_csv: backtest.pnl_cum().to_csv(self.DUMP_CSV + key + ".csv") + if br.write_csv: backtest.pnl_cum().to_csv( + self.DUMP_CSV + key + ".csv") if br.calc_stats: desc = [key + ' ' + str(backtest.portfolio_pnl_desc()[0])] @@ -1204,7 +1388,11 @@ def construct_individual_strategy(self, br, spot_df, spot_df2, asset_df, tech_pa # return desc, backtest.portfolio_cum(), backtest.portfolio_leverage(), backtest.portfolio_pnl_ret_stats(), \ # key, _ - def compare_strategy_vs_benchmark(self, br, strategy_df, benchmark_df): + def compare_strategy_vs_benchmark( + self, + br: BacktestRequest, + strategy_df: pd.DataFrame, + benchmark_df: pd.DataFrame): """Compares the trading strategy we are backtesting against a benchmark Parameters @@ -1225,33 +1413,41 @@ def compare_strategy_vs_benchmark(self, br, strategy_df, benchmark_df): # Align strategy time series with that of benchmark benchmark_df.columns = [x + ' be' for x in benchmark_df.columns] - strategy_df, benchmark_df = strategy_df.align(benchmark_df, join='left', axis=0) + strategy_df, benchmark_df = strategy_df.align(benchmark_df, + join='left', axis=0) # If necessary apply vol target to benchmark (to make it comparable with strategy) if br.portfolio_vol_adjust is True: - benchmark_df = risk_engine.calculate_vol_adjusted_index_from_prices(benchmark_df, br=br) + benchmark_df = risk_engine.calculate_vol_adjusted_index_from_prices( + benchmark_df, br=br) # Only calculate return statistics if this has been specified (note when different frequencies of data # might underrepresent vol # if calc_stats: benchmark_df = benchmark_df.fillna(method='ffill') - benchmark_df = self._filter_by_plot_start_finish_date(benchmark_df, br) + benchmark_df = self._filter_by_plot_start_finish_date(benchmark_df, + br) - ret_stats.calculate_ret_stats_from_prices(benchmark_df, br.ann_factor) + ret_stats.calculate_ret_stats_from_prices(benchmark_df, + br.ann_factor) if br.calc_stats: benchmark_df.columns = ret_stats.summary() # Realign strategy & benchmark strategy_benchmark_df = strategy_df.join(benchmark_df, how='inner') - strategy_benchmark_df = strategy_benchmark_df.fillna(method='ffill') + strategy_benchmark_df = strategy_benchmark_df.fillna( + method='ffill') - strategy_benchmark_df = self._filter_by_plot_start_finish_date(strategy_benchmark_df, br) + strategy_benchmark_df = self._filter_by_plot_start_finish_date( + strategy_benchmark_df, br) if br.cum_index == 'mult': - strategy_benchmark_df = calculations.create_mult_index_from_prices(strategy_benchmark_df) + strategy_benchmark_df = calculations.create_mult_index_from_prices( + strategy_benchmark_df) elif br.cum_index == 'add': - strategy_benchmark_df = calculations.create_add_index_from_prices(strategy_benchmark_df) + strategy_benchmark_df = calculations.create_add_index_from_prices( + strategy_benchmark_df) self._strategy_benchmark_pnl = benchmark_df self._strategy_benchmark_pnl_ret_stats = ret_stats @@ -1260,7 +1456,10 @@ def compare_strategy_vs_benchmark(self, br, strategy_df, benchmark_df): return strategy_df - def _filter_by_plot_start_finish_date(self, df, br): + def _filter_by_plot_start_finish_date( + self, + df: pd.DataFrame, + br: BacktestRequest) -> pd.DataFrame: if br.plot_start is None and br.plot_finish is None: return df else: @@ -1274,9 +1473,10 @@ def _filter_by_plot_start_finish_date(self, df, br): if br.plot_finish is not None: plot_finish = br.plot_finish - return filter.filter_time_series_by_date(plot_start, plot_finish, df) + return filter.filter_time_series_by_date(plot_start, plot_finish, + df) - def _flatten_list(self, list_of_lists): + def _flatten_list(self, list_of_lists: List[str]) -> List[str]: """Flattens list, particularly useful for combining baskets Parameters @@ -1300,107 +1500,110 @@ def _flatten_list(self, list_of_lists): return result - def strategy_name(self): + def strategy_name(self) -> str: return self.FINAL_STRATEGY - def individual_leverage(self): + def individual_leverage(self) -> pd.DataFrame: return self._individual_leverage - def strategy_group_pnl_trades(self): + def strategy_group_pnl_trades(self) -> pd.DataFrame: return self._strategy_group_pnl_trades ### components (after signal and portfolio weighting) - def strategy_components_pnl(self): + def strategy_components_pnl(self) -> pd.DataFrame: return self._strategy_components_pnl - def strategy_components_pnl_ret_stats(self): + def strategy_components_pnl_ret_stats(self) -> pd.DataFrame: return self._strategy_components_pnl_ret_stats ### Final strategy - def strategy_pnl(self): + def strategy_pnl(self) -> pd.DataFrame: return self._strategy_pnl - def strategy_pnl_ret_stats(self): + def strategy_pnl_ret_stats(self) -> pd.DataFrame: return self._strategy_pnl_ret_stats - def strategy_leverage(self): + def strategy_leverage(self) -> pd.DataFrame: return self._strategy_leverage ### Final PNL strategy + benchmark - def strategy_benchmark_pnl(self): + def strategy_benchmark_pnl(self) -> pd.DataFrame: return self._strategy_benchmark_pnl - def strategy_benchmark_pnl_ret_stats(self): + def strategy_benchmark_pnl_ret_stats(self) -> pd.DataFrame: return self._strategy_benchmark_pnl_ret_stats ### Final PNL + group - def strategy_group_pnl(self): + def strategy_group_pnl(self) -> pd.DataFrame: return self._strategy_group_pnl - def strategy_group_pnl_ret_stats(self): + def strategy_group_pnl_ret_stats(self) -> pd.DataFrame: return self._strategy_group_pnl_ret_stats ### Final P&L + group + benchmark - def strategy_group_benchmark_pnl(self): + def strategy_group_benchmark_pnl(self) -> pd.DataFrame: return self._strategy_group_benchmark_pnl - def strategy_group_benchmark_pnl_ret_stats(self): + def strategy_group_benchmark_pnl_ret_stats(self) -> pd.DataFrame: return self._strategy_group_benchmark_pnl_ret_stats - def strategy_group_leverage(self): + def strategy_group_leverage(self) -> pd.DataFrame: return self._strategy_group_leverage # Signals - def strategy_signal(self): + def strategy_signal(self) -> pd.DataFrame: return self._strategy_signal - def strategy_trade(self): + def strategy_trade(self) -> pd.DataFrame: return self._strategy_trade - def strategy_signal_notional(self): + def strategy_signal_notional(self) -> pd.DataFrame: return self._strategy_signal_notional - def strategy_trade_notional(self): + def strategy_trade_notional(self) -> pd.DataFrame: return self._strategy_trade_notional - def strategy_trade_notional_sizes(self): + def strategy_trade_notional_sizes(self) -> pd.DataFrame: return self._strategy_trade_notional_sizes - def strategy_signal_contracts(self): + def strategy_signal_contracts(self) -> pd.DataFrame: return self._strategy_signal_contracts - def strategy_trade_contracts(self): + def strategy_trade_contracts(self) -> pd.DataFrame: return self._strategy_trade_contracts - def strategy_total_longs(self): + def strategy_total_longs(self) -> pd.DataFrame: return self._strategy_total_longs - def strategy_total_shorts(self): + def strategy_total_shorts(self) -> pd.DataFrame: return self._strategy_total_shorts - def strategy_net_exposure(self): + def strategy_net_exposure(self) -> pd.DataFrame: return self._strategy_net_exposure - def strategy_total_exposure(self): + def strategy_total_exposure(self) -> pd.DataFrame: return self._strategy_total_exposure - def strategy_total_longs_notional(self): + def strategy_total_longs_notional(self) -> pd.DataFrame: return self._strategy_total_longs_notional - def strategy_total_shorts_notional(self): + def strategy_total_shorts_notional(self) -> pd.DataFrame: return self._strategy_total_shorts_notional - def strategy_net_exposure_notional(self): + def strategy_net_exposure_notional(self) -> pd.DataFrame: return self._strategy_net_exposure_notional - def strategy_total_exposure_notional(self): + def strategy_total_exposure_notional(self) -> pd.DataFrame: return self._strategy_total_exposure_notional #### Plotting #### Plotting - def _reduce_plot(self, data_frame, reduce_plot=True, resample='B'): + def _reduce_plot(self, + data_frame: pd.DataFrame, + reduce_plot: bool = True, + resample: str = "B") -> pd.DataFrame: """Reduces the frequency of a time series to every business day so it can be plotted more easily Parameters @@ -1422,7 +1625,14 @@ def _reduce_plot(self, data_frame, reduce_plot=True, resample='B'): except: return data_frame - def _chart_return_with_df(self, df, style, silent_plot, chart_type='line', ret_with_df=False, split_on_char=None): + def _chart_return_with_df( + self, + df: pd.DataFrame, + style: Style, + silent_plot: bool, + chart_type: str = "line", + ret_with_df: bool = False, + split_on_char: str = None): if split_on_char is not None: d_split = [] @@ -1435,7 +1645,8 @@ def _chart_return_with_df(self, df, style, silent_plot, chart_type='line', ret_w df.columns = d_split - chart = Chart(df, engine=self.DEFAULT_PLOT_ENGINE, chart_type=chart_type, style=style) + chart = Chart(df, engine=self.DEFAULT_PLOT_ENGINE, + chart_type=chart_type, style=style) if not (silent_plot): chart.plot() if ret_with_df: return chart, df @@ -1444,24 +1655,43 @@ def _chart_return_with_df(self, df, style, silent_plot, chart_type='line', ret_w ##### Quick helper functions to plot aspects of the strategy such as P&L, leverage etc. - def plot_individual_leverage(self, strip=None, silent_plot=False, reduce_plot=True, resample='B', ret_with_df=False, - split_on_char=None): + def plot_individual_leverage( + self, + strip: str = None, + silent_plot: bool = False, + reduce_plot: bool = True, + resample: str = "B", + ret_with_df: bool = False, + split_on_char: str = None): - style = self._create_style("Individual Leverage", "Individual Leverage", reduce_plot=reduce_plot) + style = self._create_style("Individual Leverage", + "Individual Leverage", + reduce_plot=reduce_plot) try: - df = self._strip_dataframe(self._reduce_plot(self._individual_leverage, - reduce_plot=reduce_plot, resample=resample), strip) - - return self._chart_return_with_df(df, style, silent_plot, chart_type='line', ret_with_df=ret_with_df, + df = self._strip_dataframe( + self._reduce_plot(self._individual_leverage, + reduce_plot=reduce_plot, resample=resample), + strip) + + return self._chart_return_with_df(df, style, silent_plot, + chart_type='line', + ret_with_df=ret_with_df, split_on_char=split_on_char) except: pass - def plot_strategy_group_pnl_trades(self, strip=None, silent_plot=False, reduce_plot=True, resample='B', - ret_with_df=False, split_on_char=None): + def plot_strategy_group_pnl_trades( + self, + strip: str = None, + silent_plot: bool = False, + reduce_plot: bool = True, + resample: str = "B", + ret_with_df: bool = False, + split_on_char: str = None): - style = self._create_style("(bp)", "Individual Trade PnL", reduce_plot=reduce_plot) + style = self._create_style("(bp)", "Individual Trade PnL", + reduce_plot=reduce_plot) # zero when there isn't a trade exit # strategy_pnl_trades = self._strategy_pnl_trades * 100 * 100 @@ -1469,55 +1699,84 @@ def plot_strategy_group_pnl_trades(self, strip=None, silent_plot=False, reduce_p # note only works with single large basket trade try: - strategy_pnl_trades = self._strategy_group_pnl_trades.fillna(0) * 100 * 100 - df = self._strip_dataframe(self._reduce_plot(strategy_pnl_trades, reduce_plot=reduce_plot, - resample=resample), strip) - - return self._chart_return_with_df(df, style, silent_plot, chart_type='line', ret_with_df=ret_with_df, - split_on_char=split_on_char) + strategy_pnl_trades = self._strategy_group_pnl_trades.fillna( + 0) * 100 * 100 + df = self._strip_dataframe(self._reduce_plot( + strategy_pnl_trades, reduce_plot=reduce_plot, + resample=resample), strip) + + return self._chart_return_with_df( + df, style, silent_plot, chart_type='line', + ret_with_df=ret_with_df, + split_on_char=split_on_char) except: pass - def plot_strategy_pnl(self, strip=None, silent_plot=False, reduce_plot=True, resample='B', ret_with_df=False, - split_on_char=None): + def plot_strategy_pnl( + self, + strip: str = None, + silent_plot: bool = False, + reduce_plot: bool = True, + resample: str = "B", + ret_with_df: bool = False, + split_on_char: str = None): style = self._create_style("", "Strategy PnL", reduce_plot=reduce_plot) try: - df = self._strip_dataframe(self._reduce_plot(self._strategy_pnl, - reduce_plot=reduce_plot, resample=resample), strip) + df = self._strip_dataframe(self._reduce_plot( + self._strategy_pnl, reduce_plot=reduce_plot, + resample=resample), strip) if hasattr(self, 'br'): if self.br.write_csv_pnl: - df.to_csv(self.DUMP_PATH + self.FINAL_STRATEGY + "_pnl.csv") + df.to_csv( + self.DUMP_PATH + self.FINAL_STRATEGY + "_pnl.csv") - return self._chart_return_with_df(df, style, silent_plot, chart_type='line', ret_with_df=ret_with_df, - split_on_char=split_on_char) + return self._chart_return_with_df( + df, style, silent_plot, chart_type="line", + ret_with_df=ret_with_df, + split_on_char=split_on_char) except Exception as e: print(str(e)) - def plot_strategy_trade_no(self, strip=None, silent_plot=False, reduce_plot=True, resample='B', ret_with_df=False, - split_on_char=None): + def plot_strategy_trade_no( + self, + strip: str = None, + silent_plot: bool = False, + reduce_plot: bool = True, + resample: str = "B", + ret_with_df: bool = False, + split_on_char: str = None): df_trades = self._strategy_trade_no - if strip is not None: df_trades.index = [k.replace(strip, '') for k in df_trades.index] + if strip is not None: df_trades.index = [k.replace(strip, '') + for k in df_trades.index] style = self._create_style("", "", reduce_plot=reduce_plot) try: - style.file_output = self.DUMP_PATH + self.FINAL_STRATEGY + ' (Strategy trade no).png' - style.html_file_output = self.DUMP_PATH + self.FINAL_STRATEGY + ' (Strategy trade no).html' + style.file_output = self.DUMP_PATH + self.FINAL_STRATEGY + " (Strategy trade no).png" + style.html_file_output = self.DUMP_PATH + self.FINAL_STRATEGY + " (Strategy trade no).html" - df = self._strip_dataframe(self._reduce_plot(df_trades, reduce_plot=reduce_plot, resample=resample), strip) + df = self._strip_dataframe(self._reduce_plot( + df_trades, reduce_plot=reduce_plot, resample=resample), strip) - return self._chart_return_with_df(df, style, silent_plot, chart_type='bar', ret_with_df=ret_with_df, - split_on_char=split_on_char) + return self._chart_return_with_df( + df, style, silent_plot, chart_type='bar', + ret_with_df=ret_with_df, + split_on_char=split_on_char) except: pass - def plot_strategy_signal_proportion(self, strip=None, silent_plot=False, reduce_plot=True, resample='B', - ret_with_df=False, - split_on_char=None): + def plot_strategy_signal_proportion( + self, + strip: str = None, + silent_plot: bool = False, + reduce_plot: bool = True, + resample: str = "B", + ret_with_df: bool = False, + split_on_char: str = None): signal = self._strategy_signal @@ -1528,95 +1787,157 @@ def plot_strategy_signal_proportion(self, strip=None, silent_plot=False, reduce_ df = pd.DataFrame(index=long.index, columns=['Long', 'Short', 'Flat']) - df['Long'] = long - df['Short'] = short - df['Flat'] = flat + df["Long"] = long + df["Short"] = short + df["Flat"] = flat - if strip is not None: df.index = [k.replace(strip, '') for k in df.index] + if strip is not None: df.index = [k.replace(strip, '') for k in + df.index] style = self._create_style("", "") try: - style.file_output = self.DUMP_PATH + self.FINAL_STRATEGY + ' (Strategy signal proportion).png' - style.html_file_output = self.DUMP_PATH + self.FINAL_STRATEGY + ' (Strategy signal proportion).html' + style.file_output = self.DUMP_PATH + self.FINAL_STRATEGY + " (Strategy signal proportion).png" + style.html_file_output = self.DUMP_PATH + self.FINAL_STRATEGY + " (Strategy signal proportion).html" - df = self._strip_dataframe(self._reduce_plot(df), strip, reduce_plot=reduce_plot, - resample=resample) + df = self._strip_dataframe( + self._reduce_plot(df), strip, reduce_plot=reduce_plot, + resample=resample) - return self._chart_return_with_df(df, style, silent_plot, chart_type='bar', ret_with_df=ret_with_df, - split_on_char=split_on_char) + return self._chart_return_with_df( + df, style, silent_plot, chart_type="bar", + ret_with_df=ret_with_df, + split_on_char=split_on_char) except: pass - def plot_strategy_leverage(self, strip=None, silent_plot=False, reduce_plot=True, resample='B', ret_with_df=False, - split_on_char=None): - style = self._create_style("Portfolio Leverage", "Strategy Leverage", reduce_plot=reduce_plot) + def plot_strategy_leverage( + self, + strip: str = None, + silent_plot: bool = False, + reduce_plot: bool = True, + resample: str = "B", + ret_with_df: bool = False, + split_on_char: str = None): + style = self._create_style("Portfolio Leverage", "Strategy Leverage", + reduce_plot=reduce_plot) try: - df = self._strip_dataframe(self._reduce_plot(self._strategy_leverage, - reduce_plot=reduce_plot, resample=resample), strip) - - return self._chart_return_with_df(df, style, silent_plot, chart_type='line', ret_with_df=ret_with_df, + df = self._strip_dataframe( + self._reduce_plot(self._strategy_leverage, + reduce_plot=reduce_plot, resample=resample), + strip) + + return self._chart_return_with_df(df, style, silent_plot, + chart_type='line', + ret_with_df=ret_with_df, split_on_char=split_on_char) except: pass ##### Plot the individual components cumulative returns and return statistics (including portfolio level constraints) - def plot_strategy_components_pnl(self, strip=None, silent_plot=False, reduce_plot=True, resample='B', - ret_with_df=False, split_on_char=None): + def plot_strategy_components_pnl( + self, + strip: str = None, + silent_plot: bool = False, + reduce_plot: bool = True, + resample: str = "B", + ret_with_df: bool = False, + split_on_char: str = None): - style = self._create_style("Ind Components", "Strategy PnL Components", reduce_plot=reduce_plot) + style = self._create_style("Ind Components", "Strategy PnL Components", + reduce_plot=reduce_plot) try: - df = self._strip_dataframe(self._reduce_plot(self._strategy_components_pnl, - reduce_plot=reduce_plot, resample=resample), strip) - - return self._chart_return_with_df(df, style, silent_plot, chart_type='line', ret_with_df=ret_with_df, + df = self._strip_dataframe( + self._reduce_plot(self._strategy_components_pnl, + reduce_plot=reduce_plot, resample=resample), + strip) + + return self._chart_return_with_df(df, style, silent_plot, + chart_type='line', + ret_with_df=ret_with_df, split_on_char=split_on_char) except: pass - def plot_strategy_components_pnl_ir(self, strip=None, silent_plot=False, ret_with_df=False, - split_on_char=None): - return self._plot_ret_stats_helper(self._strategy_components_pnl_ret_stats, 'IR', 'Ind Component', - 'Ind Component IR', - strip=strip, - silent_plot=silent_plot, ret_with_df=ret_with_df, - split_on_char=split_on_char) - - def plot_strategy_components_pnl_returns(self, strip=None, silent_plot=False, ret_with_df=False, - split_on_char=None): - return self._plot_ret_stats_helper(self._strategy_components_pnl_ret_stats, 'Returns', 'Ind Component (%)', - 'Ind Component Returns', strip=strip, - silent_plot=silent_plot, ret_with_df=ret_with_df, - split_on_char=split_on_char) - - def plot_strategy_components_pnl_vol(self, strip=None, silent_plot=False, ret_with_df=False, split_on_char=None): - return self._plot_ret_stats_helper(self._strategy_components_pnl_ret_stats, 'Vol', 'Ind Component (%)', - 'Ind Component Vol', strip=strip, - silent_plot=silent_plot, ret_with_df=ret_with_df, - split_on_char=split_on_char) - - def plot_strategy_components_pnl_drawdowns(self, strip=None, silent_plot=False, ret_with_df=False, - split_on_char=None): - return self._plot_ret_stats_helper(self._strategy_components_pnl_ret_stats, 'Drawdowns', 'Ind Component (%)', - 'Ind Component Drawdowns', strip=strip, - silent_plot=silent_plot, ret_with_df=ret_with_df, - split_on_char=split_on_char) - - def plot_strategy_components_pnl_yoy(self, strip=None, silent_plot=False, ret_with_df=False, - split_on_char=None): - - return self.plot_yoy_helper(self._strategy_components_pnl_ret_stats, 'Ind Component YoY', 'Ind Component (%)', - strip=strip, silent_plot=silent_plot, ret_with_df=ret_with_df, + def plot_strategy_components_pnl_ir( + self, + strip: str = None, + silent_plot: bool = False, + ret_with_df: bool = False, + split_on_char: str = None): + return self._plot_ret_stats_helper( + self._strategy_components_pnl_ret_stats, 'IR', 'Ind Component', + 'Ind Component IR', + strip=strip, + silent_plot=silent_plot, ret_with_df=ret_with_df, + split_on_char=split_on_char) + + def plot_strategy_components_pnl_returns( + self, + strip: str = None, + silent_plot: bool = False, + ret_with_df: bool = False, + split_on_char: str = None): + return self._plot_ret_stats_helper( + self._strategy_components_pnl_ret_stats, 'Returns', + 'Ind Component (%)', + 'Ind Component Returns', strip=strip, + silent_plot=silent_plot, ret_with_df=ret_with_df, + split_on_char=split_on_char) + + def plot_strategy_components_pnl_vol( + self, + strip: str = None, + silent_plot: bool = False, + ret_with_df: bool = False, + split_on_char: str = None): + return self._plot_ret_stats_helper( + self._strategy_components_pnl_ret_stats, 'Vol', + 'Ind Component (%)', + 'Ind Component Vol', strip=strip, + silent_plot=silent_plot, ret_with_df=ret_with_df, + split_on_char=split_on_char) + + def plot_strategy_components_pnl_drawdowns( + self, + strip: str = None, + silent_plot: bool = False, + ret_with_df: bool = False, + split_on_char: str = None): + return self._plot_ret_stats_helper( + self._strategy_components_pnl_ret_stats, 'Drawdowns', + 'Ind Component (%)', + 'Ind Component Drawdowns', strip=strip, + silent_plot=silent_plot, ret_with_df=ret_with_df, + split_on_char=split_on_char) + + def plot_strategy_components_pnl_yoy( + self, + strip: str = None, + silent_plot: bool = False, + ret_with_df: bool = False, + split_on_char: str = None): + + return self.plot_yoy_helper(self._strategy_components_pnl_ret_stats, + 'Ind Component YoY', 'Ind Component (%)', + strip=strip, silent_plot=silent_plot, + ret_with_df=ret_with_df, split_on_char=split_on_char) ##### plot the cumulative returns and return statistics for the strategy group ##### this will plot the final strategy, benchmark and all the individual baskets (as though they were run by themselves) - def plot_strategy_group_benchmark_pnl(self, strip=None, silent_plot=False, reduce_plot=True, resample='B', - ret_with_df=False, split_on_char=None): + def plot_strategy_group_benchmark_pnl( + self, + strip: str = None, + silent_plot: bool = False, + reduce_plot: bool = True, + resample: str = "B", + ret_with_df: bool = False, + split_on_char: str = None): logger = LoggerManager().getLogger(__name__) style = self._create_style("", "Group Benchmark PnL - cumulative") @@ -1626,54 +1947,78 @@ def plot_strategy_group_benchmark_pnl(self, strip=None, silent_plot=False, reduc for line in strat_list: logger.info(line) # plot cumulative line of returns - df = self._strip_dataframe(self._reduce_plot(self._strategy_group_benchmark_pnl, - reduce_plot=reduce_plot, resample=resample), strip) - - return self._chart_return_with_df(df, style, silent_plot, chart_type='line', ret_with_df=ret_with_df, - split_on_char=split_on_char) - - def plot_strategy_group_benchmark_pnl_ir(self, strip=None, silent_plot=False, ret_with_df=False, - split_on_char=None): - return self._plot_ret_stats_helper(self._strategy_group_benchmark_pnl_ret_stats, 'IR', '', - 'Group Benchmark IR', - strip=strip, silent_plot=silent_plot, ret_with_df=ret_with_df, - split_on_char=split_on_char) - - def plot_strategy_group_benchmark_pnl_returns(self, strip=None, silent_plot=False, ret_with_df=False, - split_on_char=None): - return self._plot_ret_stats_helper(self._strategy_group_benchmark_pnl_ret_stats, 'Returns', '(%)', - 'Group Benchmark Returns', - strip=strip, silent_plot=silent_plot, ret_with_df=ret_with_df, - split_on_char=split_on_char) - - def plot_strategy_group_benchmark_pnl_vol(self, strip=None, silent_plot=False, ret_with_df=False, - split_on_char=None): - return self._plot_ret_stats_helper(self._strategy_group_benchmark_pnl_ret_stats, 'Vol', '(%)', - 'Group Benchmark Vol', - strip=strip, silent_plot=silent_plot, ret_with_df=ret_with_df, - split_on_char=split_on_char) - - def plot_strategy_group_benchmark_pnl_drawdowns(self, strip=None, silent_plot=False, ret_with_df=False, - split_on_char=None): - return self._plot_ret_stats_helper(self._strategy_group_benchmark_pnl_ret_stats, 'Drawdowns', '(%)', - 'Group Benchmark Drawdowns', strip=strip, - silent_plot=silent_plot, ret_with_df=ret_with_df, - split_on_char=split_on_char) - - def _plot_ret_stats_helper(self, ret_stats, metric, title, file_tag, strip=None, silent_plot=False, + df = self._strip_dataframe( + self._reduce_plot(self._strategy_group_benchmark_pnl, + reduce_plot=reduce_plot, resample=resample), + strip) + + return self._chart_return_with_df( + df, style, silent_plot, chart_type="line", ret_with_df=ret_with_df, + split_on_char=split_on_char) + + def plot_strategy_group_benchmark_pnl_ir( + self, + strip: str = None, + silent_plot: bool = False, + ret_with_df: bool = False, + split_on_char: str = None): + return self._plot_ret_stats_helper( + self._strategy_group_benchmark_pnl_ret_stats, "IR", "", + 'Group Benchmark IR', + strip=strip, silent_plot=silent_plot, ret_with_df=ret_with_df, + split_on_char=split_on_char) + + def plot_strategy_group_benchmark_pnl_returns( + self, + strip: str = None, + silent_plot: bool = False, + ret_with_df: bool = False, + split_on_char: str = None): + return self._plot_ret_stats_helper( + self._strategy_group_benchmark_pnl_ret_stats, "Returns", '(%)', + "Group Benchmark Returns", + strip=strip, silent_plot=silent_plot, ret_with_df=ret_with_df, + split_on_char=split_on_char) + + def plot_strategy_group_benchmark_pnl_vol( + self, + strip: str = None, + silent_plot: bool = False, + ret_with_df: bool = False, + split_on_char: str = None): + return self._plot_ret_stats_helper( + self._strategy_group_benchmark_pnl_ret_stats, 'Vol', '(%)', + 'Group Benchmark Vol', + strip=strip, silent_plot=silent_plot, ret_with_df=ret_with_df, + split_on_char=split_on_char) + + def plot_strategy_group_benchmark_pnl_drawdowns( + self, + strip: str = None, + silent_plot: bool = False, + ret_with_df: bool = False, + split_on_char: str = None): + return self._plot_ret_stats_helper( + self._strategy_group_benchmark_pnl_ret_stats, 'Drawdowns', '(%)', + 'Group Benchmark Drawdowns', strip=strip, + silent_plot=silent_plot, ret_with_df=ret_with_df, + split_on_char=split_on_char) + + def _plot_ret_stats_helper(self, ret_stats, metric, title, file_tag, + strip=None, silent_plot=False, ret_with_df=False, split_on_char=None): style = self._create_style(title, file_tag) keys = ret_stats.keys() ret_metric = [] for key in keys: - if metric == 'IR': + if metric == "IR": ret_metric.append(ret_stats[key].inforatio()[0]) - elif metric == 'Returns': + elif metric == "Returns": ret_metric.append(ret_stats[key].ann_returns()[0] * 100) - elif metric == 'Vol': + elif metric == "Vol": ret_metric.append(ret_stats[key].ann_vol()[0] * 100) - elif metric == 'Drawdowns': + elif metric == "Drawdowns": ret_metric.append(ret_stats[key].drawdowns()[0] * 100) if strip is not None: keys = [k.replace(strip, '') for k in keys] @@ -1686,18 +2031,34 @@ def _plot_ret_stats_helper(self, ret_stats, metric, title, file_tag, strip=None, style.scale_factor) + '.html' style.display_brand_label = False - return self._chart_return_with_df(ret_stats, style, silent_plot, chart_type='bar', ret_with_df=ret_with_df, + return self._chart_return_with_df(ret_stats, style, silent_plot, + chart_type='bar', + ret_with_df=ret_with_df, split_on_char=split_on_char) - def plot_strategy_group_benchmark_pnl_yoy(self, strip=None, silent_plot=False, ret_with_df=False, split_on_char=None): - - return self.plot_yoy_helper(self._strategy_group_benchmark_pnl_ret_stats, "", "Group Benchmark PnL YoY", - strip=strip, - silent_plot=silent_plot, ret_with_df=ret_with_df, - split_on_char=split_on_char) - - def plot_yoy_helper(self, ret_stats, title, file_tag, strip=None, silent_plot=False, ret_with_df=False, - split_on_char=None): + def plot_strategy_group_benchmark_pnl_yoy( + self, + strip: str = None, + silent_plot: bool = False, + ret_with_df: bool = False, + split_on_char: str = None): + + return self.plot_yoy_helper( + self._strategy_group_benchmark_pnl_ret_stats, "", + "Group Benchmark PnL YoY", + strip=strip, + silent_plot=silent_plot, ret_with_df=ret_with_df, + split_on_char=split_on_char) + + def plot_yoy_helper( + self, + ret_stats: dict, + title: str, + file_tag: str, + strip: str = None, + silent_plot: bool = False, + ret_with_df: bool = False, + split_on_char: str = None): style = self._create_style(title, title) # keys = self._strategy_group_benchmark_ret_stats.keys() @@ -1724,25 +2085,45 @@ def plot_yoy_helper(self, ret_stats, title, file_tag, strip=None, silent_plot=Fa ret_stats = ret_stats * 100 - return self._chart_return_with_df(ret_stats, style, silent_plot, chart_type='line', ret_with_df=ret_with_df, + return self._chart_return_with_df(ret_stats, style, silent_plot, + chart_type='line', + ret_with_df=ret_with_df, split_on_char=split_on_char) - def plot_strategy_group_leverage(self, silent_plot=False, reduce_plot=True, resample='B', ret_with_df=False, - split_on_char=None): + def plot_strategy_group_leverage( + self, + silent_plot: bool = False, + reduce_plot: bool = True, + resample: str = "B", + ret_with_df: bool = False, + split_on_char: str = None): - style = self._create_style("Leverage", "Group Leverage", reduce_plot=reduce_plot) + style = self._create_style("Leverage", "Group Leverage", + reduce_plot=reduce_plot) - df = self._reduce_plot(self._strategy_group_leverage, reduce_plot=reduce_plot, resample=resample) + df = self._reduce_plot(self._strategy_group_leverage, + reduce_plot=reduce_plot, resample=resample) - return self._chart_return_with_df(df, style, silent_plot, chart_type='line', ret_with_df=ret_with_df, + return self._chart_return_with_df(df, style, silent_plot, + chart_type='line', + ret_with_df=ret_with_df, split_on_char=split_on_char) ###### Plot signals and trades, in terms of units, notionals and contract sizes (eg. for futures) - def plot_strategy_all_signals(self, signal_show=None, strip=None, silent_plot=False, reduce_plot=True, resample='B', - ret_with_df=False, split_on_char=None, multiplier=100): + def plot_strategy_all_signals( + self, + signal_show: str = None, + strip: str = None, + silent_plot: bool = False, + reduce_plot: bool = True, + resample: str = "B", + ret_with_df: bool = False, + split_on_char: str = None, + multiplier: int = 100): - style = self._create_style("positions (% portfolio notional)", "Positions", reduce_plot=reduce_plot) + style = self._create_style("positions (% portfolio notional)", + "Positions", reduce_plot=reduce_plot) df = self._strategy_signal.copy() * multiplier @@ -1761,124 +2142,252 @@ def plot_strategy_all_signals(self, signal_show=None, strip=None, silent_plot=Fa df = df.drop(not_found, axis=1) try: - df = self._strip_dataframe(self._reduce_plot(df, reduce_plot=reduce_plot, - resample=resample), strip) - return self._chart_return_with_df(df, style, silent_plot, chart_type='line', ret_with_df=ret_with_df) + df = self._strip_dataframe( + self._reduce_plot(df, reduce_plot=reduce_plot, + resample=resample), strip) + return self._chart_return_with_df(df, style, silent_plot, + chart_type='line', + ret_with_df=ret_with_df) except: pass - def plot_strategy_signals(self, date=None, strip=None, silent_plot=False, strip_times=False, ret_with_df=False, - split_on_char=None, multiplier=100): - return self._plot_signal(self._strategy_signal, label="positions (% portfolio notional)", caption="Positions", - date=date, strip=strip, silent_plot=silent_plot, strip_times=strip_times, - ret_with_df=ret_with_df, - split_on_char=split_on_char, multiplier=multiplier) - - def plot_strategy_trades(self, date=None, strip=None, silent_plot=False, strip_times=False, ret_with_df=False, - split_on_char=None, multiplier=100): - return self._plot_signal(self._strategy_trade, label="trades (% portfolio notional)", caption="Trades", - date=date, strip=strip, silent_plot=silent_plot, strip_times=strip_times, - ret_with_df=ret_with_df, - split_on_char=split_on_char, multiplier=multiplier) - - def plot_strategy_signals_notional(self, date=None, strip=None, silent_plot=False, strip_times=False, - ret_with_df=False, - split_on_char=None, multiplier=1): - return self._plot_signal(self._strategy_signal_notional, label="positions (scaled by notional)", + def plot_strategy_signals( + self, + date: str = None, + strip: str = None, + silent_plot: bool = False, + strip_times: bool = False, + ret_with_df: bool = False, + split_on_char: str = None, + multiplier: int = 100): + return self._plot_signal(self._strategy_signal, + label="positions (% portfolio notional)", caption="Positions", - date=date, strip=strip, silent_plot=silent_plot, strip_times=strip_times, + date=date, strip=strip, + silent_plot=silent_plot, + strip_times=strip_times, ret_with_df=ret_with_df, - split_on_char=split_on_char, multiplier=multiplier) - - def plot_strategy_trades_notional(self, date=None, strip=None, silent_plot=False, strip_times=False, - split_on_char=None, multiplier=1): - return self._plot_signal(self._strategy_trade_notional, label="trades (scaled by notional)", caption="Trades", - date=date, strip=strip, silent_plot=silent_plot, strip_times=strip_times, - split_on_char=split_on_char, multiplier=multiplier) - - def plot_strategy_trades_notional_sizes(self, strip=None, silent_plot=False, ret_with_df=False, split_on_char=None): - - if strip is not None: self._strategy_trade_notional_sizes.index = [k.replace(strip, '') - for k in - self._strategy_trade_notional_sizes.index] + split_on_char=split_on_char, + multiplier=multiplier) + + def plot_strategy_trades( + self, + date: str = None, + strip: str = None, + silent_plot: bool = False, + strip_times: bool = False, + ret_with_df: bool = False, + split_on_char: str = None, + multiplier: int = 100): + return self._plot_signal(self._strategy_trade, + label="trades (% portfolio notional)", + caption="Trades", + date=date, strip=strip, + silent_plot=silent_plot, + strip_times=strip_times, + ret_with_df=ret_with_df, + split_on_char=split_on_char, + multiplier=multiplier) + + def plot_strategy_signals_notional( + self, + date: str = None, + strip: str = None, + silent_plot: bool = False, + strip_times: bool = False, + ret_with_df: bool = False, + split_on_char: str = None, + multiplier: int = 1): + + return self._plot_signal( + self._strategy_signal_notional, + label="positions (scaled by notional)", + caption="Positions", + date=date, strip=strip, silent_plot=silent_plot, + strip_times=strip_times, + ret_with_df=ret_with_df, + split_on_char=split_on_char, + multiplier=multiplier) + + def plot_strategy_trades_notional( + self, + date: str = None, + strip: str = None, + silent_plot: bool = False, + strip_times: bool = False, + split_on_char: str = None, + multiplier: int = 1): + + return self._plot_signal( + self._strategy_trade_notional, label="trades (scaled by notional)", + caption="Trades", + date=date, strip=strip, silent_plot=silent_plot, + strip_times=strip_times, + split_on_char=split_on_char, multiplier=multiplier) + + def plot_strategy_trades_notional_sizes( + self, + strip: str = None, + silent_plot: bool = False, + ret_with_df: bool = False, + split_on_char: str = None): + + if strip is not None: self._strategy_trade_notional_sizes.index = [ + k.replace(strip, '') + for k in + self._strategy_trade_notional_sizes.index] style = self._create_style("", "", reduce_plot=False) try: - style.file_output = self.DUMP_PATH + self.FINAL_STRATEGY + ' (Strategy trade notional size).png' - style.html_file_output = self.DUMP_PATH + self.FINAL_STRATEGY + ' (Strategy trade notional size).html' + style.file_output = self.DUMP_PATH + self.FINAL_STRATEGY + " (Strategy trade notional size).png" + style.html_file_output = self.DUMP_PATH + self.FINAL_STRATEGY + " (Strategy trade notional size).html" - df = self._strip_dataframe(self._strategy_trade_notional_sizes, strip) + df = self._strip_dataframe(self._strategy_trade_notional_sizes, + strip) - return self._chart_return_with_df(df, style, silent_plot, chart_type='bar', ret_with_df=ret_with_df, + return self._chart_return_with_df(df, style, silent_plot, + chart_type='bar', + ret_with_df=ret_with_df, split_on_char=split_on_char) except: pass - def plot_strategy_signals_contracts(self, date=None, strip=None, silent_plot=False, strip_times=False, - ret_with_df=False, split_on_char=None, multiplier=1): - return self._plot_signal(self._strategy_signal_contracts, label="positions (contracts)", caption="Positions", - date=date, strip=strip, silent_plot=silent_plot, strip_times=strip_times, - ret_with_df=ret_with_df, split_on_char=split_on_charm, multiplier=multiplier) - - def plot_strategy_trades_contracts(self, date=None, strip=None, silent_plot=False, strip_times=False, - ret_with_df=False, split_on_char=None, multiplier=1): - return self._plot_signal(self._strategy_trade_contracts, label="trades (contracts)", caption="Contracts", - date=date, strip=strip, silent_plot=silent_plot, strip_times=strip_times, - ret_with_df=ret_with_df, split_on_char=split_on_charm, multiplier=multiplier) + def plot_strategy_signals_contracts( + self, + date: str = None, + strip: str = None, + silent_plot: bool = False, + strip_times: bool = False, + ret_with_df: bool = False, + split_on_char: str = None, + multiplier: int = 1): + return self._plot_signal(self._strategy_signal_contracts, + label="positions (contracts)", + caption="Positions", + date=date, strip=strip, + silent_plot=silent_plot, + strip_times=strip_times, + ret_with_df=ret_with_df, + split_on_char=split_on_char, + multiplier=multiplier) + + def plot_strategy_trades_contracts( + self, + date: str = None, + strip: str = None, + silent_plot: bool = False, + strip_times: bool = False, + ret_with_df: bool = False, + split_on_char: str = None, + multiplier: int = 1): + return self._plot_signal(self._strategy_trade_contracts, + label="trades (contracts)", + caption="Contracts", + date=date, strip=strip, + silent_plot=silent_plot, + strip_times=strip_times, + ret_with_df=ret_with_df, + split_on_char=split_on_char, + multiplier=multiplier) ###### plot aggregated portfolio exposures - def plot_strategy_total_exposures(self, strip=None, silent_plot=False, reduce_plot=True, resample='B', - ret_with_df=False, split_on_char=None): + def plot_strategy_total_exposures( + self, + strip: str = None, + silent_plot: bool = False, + reduce_plot: bool = True, + resample: str = "B", + ret_with_df: bool = False, + split_on_char: str = None): style = self._create_style("", "Strategy Total Exposures") - df = pd.concat([self._strategy_total_longs, self._strategy_total_shorts, self._strategy_total_exposure], - axis=1) + df = pd.concat( + [self._strategy_total_longs, self._strategy_total_shorts, + self._strategy_total_exposure], + axis=1) try: - df = self._strip_dataframe(self._reduce_plot(df, reduce_plot=reduce_plot, resample=resample), strip) - return self._chart_return_with_df(df, style, silent_plot, chart_type='line', ret_with_df=ret_with_df, + df = self._strip_dataframe( + self._reduce_plot(df, reduce_plot=reduce_plot, + resample=resample), strip) + return self._chart_return_with_df(df, style, silent_plot, + chart_type='line', + ret_with_df=ret_with_df, split_on_char=split_on_char) except: pass - def plot_strategy_net_exposures(self, strip=None, silent_plot=False, reduce_plot=True, resample='B', - ret_with_df=False, split_on_char=None): - style = self._create_style("", "Strategy Net Exposures", reduce_plot=reduce_plot) + def plot_strategy_net_exposures( + self, + strip: str = None, + silent_plot: bool = False, + reduce_plot: bool = True, + resample: str = "B", + ret_with_df: bool = False, + split_on_char: str = None): + style = self._create_style("", "Strategy Net Exposures", + reduce_plot=reduce_plot) try: - df = self._strip_dataframe(self._reduce_plot(self._strategy_net_exposure, - reduce_plot=reduce_plot, resample=resample), strip) - return self._chart_return_with_df(df, style, silent_plot, chart_type='line', ret_with_df=ret_with_df, + df = self._strip_dataframe( + self._reduce_plot(self._strategy_net_exposure, + reduce_plot=reduce_plot, resample=resample), + strip) + return self._chart_return_with_df(df, style, silent_plot, + chart_type='line', + ret_with_df=ret_with_df, split_on_char=split_on_char) except: pass - def plot_strategy_total_exposures_notional(self, strip=None, silent_plot=False, reduce_plot=True, resample='B', - ret_with_df=False, split_on_char=None): - - style = self._create_style("(mm)", "Strategy Total Exposures (mm)", reduce_plot=reduce_plot) + def plot_strategy_total_exposures_notional( + self, + strip: str = None, + silent_plot: bool = False, + reduce_plot: bool = True, + resample: str = "B", + ret_with_df: bool = False, + split_on_char: str = None): + + style = self._create_style("(mm)", "Strategy Total Exposures (mm)", + reduce_plot=reduce_plot) df = pd.concat([self._strategy_total_longs_notional, - self._strategy_total_shorts_notional, self._strategy_total_exposure_notional], axis=1) + self._strategy_total_shorts_notional, + self._strategy_total_exposure_notional], axis=1) try: - df = self._strip_dataframe(self._reduce_plot(df / 1000000.0, reduce_plot=reduce_plot, resample=resample), - strip) - return self._chart_return_with_df(df, style, silent_plot, chart_type='line', ret_with_df=ret_with_df, + df = self._strip_dataframe( + self._reduce_plot(df / 1000000.0, reduce_plot=reduce_plot, + resample=resample), + strip) + return self._chart_return_with_df(df, style, silent_plot, + chart_type='line', + ret_with_df=ret_with_df, split_on_char=split_on_char) except: pass - def plot_strategy_net_exposures_notional(self, strip=None, silent_plot=False, reduce_plot=True, resample='B', - ret_with_df=False, split_on_char=None): + def plot_strategy_net_exposures_notional( + self, + strip: str = None, + silent_plot: bool = False, + reduce_plot: bool = True, + resample: str = "B", + ret_with_df: bool = False, + split_on_char: str = None): - style = self._create_style("(mm)", "Strategy Net Exposures (mm)", reduce_plot=reduce_plot) + style = self._create_style("(mm)", "Strategy Net Exposures (mm)", + reduce_plot=reduce_plot) try: - df = self._strip_dataframe(self._reduce_plot(self._strategy_net_exposure_notional / 1000000.0, - reduce_plot=reduce_plot, resample=resample), strip) - return self._chart_return_with_df(df, style, silent_plot, chart_type='line', ret_with_df=ret_with_df, + df = self._strip_dataframe(self._reduce_plot( + self._strategy_net_exposure_notional / 1000000.0, + reduce_plot=reduce_plot, resample=resample), strip) + return self._chart_return_with_df(df, style, silent_plot, + chart_type='line', + ret_with_df=ret_with_df, split_on_char=split_on_char) except: pass @@ -1897,7 +2406,8 @@ def _grab_signals(self, strategy_signal, date=None, strip=None): # In case our history is not long enough for d in date: try: - last_day.append(strategy_signal.iloc[d].transpose().to_frame()) + last_day.append( + strategy_signal.iloc[d].transpose().to_frame()) except: pass @@ -1909,12 +2419,13 @@ def _grab_signals(self, strategy_signal, date=None, strip=None): return last_day - def _plot_signal(self, sig, label=' ', caption='', date=None, strip=None, silent_plot=False, strip_times=False, + def _plot_signal(self, sig, label=' ', caption='', date=None, strip=None, + silent_plot=False, strip_times=False, ret_with_df=False, split_on_char=None, multiplier=100): ######## Plot signals - strategy_signal = multiplier * (sig) + strategy_signal = multiplier * (sig) last_day = self._grab_signals(strategy_signal, date=date, strip=strip) style = self._create_style(label, caption) @@ -1933,21 +2444,25 @@ def _plot_signal(self, sig, label=' ', caption='', date=None, strip=None, silent except: pass - return self._chart_return_with_df(last_day, style, silent_plot, chart_type='bar', ret_with_df=ret_with_df, + return self._chart_return_with_df(last_day, style, silent_plot, + chart_type='bar', + ret_with_df=ret_with_df, split_on_char=split_on_char) def _strip_dataframe(self, data_frame, strip): if strip is None: return data_frame - if not(isinstance(strip, list)): + if not (isinstance(strip, list)): strip = [strip] for s in strip: if s == '.': - data_frame.columns = [x.split(s)[0] if s in x else x for x in data_frame.columns] + data_frame.columns = [x.split(s)[0] if s in x else x for x in + data_frame.columns] else: - data_frame.columns = [x.replace(s, '') if s in x else x for x in data_frame.columns] + data_frame.columns = [x.replace(s, '') if s in x else x for x + in data_frame.columns] return data_frame @@ -1956,21 +2471,22 @@ def _create_style(self, title, file_add, reduce_plot=True): if self.SHOW_TITLES: style.title = self.FINAL_STRATEGY + " " + title - + style.display_legend = True style.scale_factor = self.SCALE_FACTOR style.width = self.WIDTH style.height = self.HEIGHT style.source = self.CHART_SOURCE - style.silent_display = not(self.SHOW_CHARTS) + style.silent_display = not (self.SHOW_CHARTS) style.legend_bgcolor = 'rgba(0,0,0,0)' # When plotting many points use WebGl version of plotly if specified - if not(reduce_plot): + if not (reduce_plot): style.plotly_webgl = True - if self.DEFAULT_PLOT_ENGINE not in ['plotly', 'cufflinks'] and self.SAVE_FIGURES: + if self.DEFAULT_PLOT_ENGINE not in ['plotly', + 'cufflinks'] and self.SAVE_FIGURES: style.file_output = self.DUMP_PATH + self.FINAL_STRATEGY + ' (' + file_add + ') ' \ + str(style.scale_factor) + '.png' @@ -1997,7 +2513,14 @@ def __init__(self, br=None): self._risk_engine = RiskEngine() self._calculations = Calculations() - def optimize_portfolio_weights(self, returns_df, signal_df, signal_pnl_cols, br=None): + def optimize_portfolio_weights( + self, + returns_df: pd.DataFrame, + signal_df: pd.DataFrame, + signal_pnl_cols: List[str], + br: BacktestRequest = None) -> ( + pd.DataFrame, pd.DataFrame, pd.DataFrame, + pd.DataFrame, pd.DataFrame, pd.DataFrame): if br is None: br = self._br @@ -2009,20 +2532,24 @@ def optimize_portfolio_weights(self, returns_df, signal_df, signal_pnl_cols, br= # Do we have a vol target for individual signals? if br.signal_vol_adjust is True: - leverage_df = self._risk_engine.calculate_leverage_factor(returns_df, br.signal_vol_target, - br.signal_vol_max_leverage, - br.signal_vol_periods, br.signal_vol_obs_in_year, - br.signal_vol_rebalance_freq, - br.signal_vol_resample_freq, - br.signal_vol_resample_type, - period_shift=br.signal_vol_period_shift) + leverage_df = self._risk_engine.calculate_leverage_factor( + returns_df, + br.signal_vol_target, + br.signal_vol_max_leverage, + br.signal_vol_periods, br.signal_vol_obs_in_year, + br.signal_vol_rebalance_freq, + br.signal_vol_resample_freq, + br.signal_vol_resample_type, + period_shift=br.signal_vol_period_shift) signal_df = pd.DataFrame( - signal_df.values * leverage_df.values, index=signal_df.index, columns=signal_df.columns) + signal_df.values * leverage_df.values, + index=signal_df.index, columns=signal_df.columns) individual_leverage_df = leverage_df # Contains leverage of individual signal (before portfolio vol target) - signal_pnl = self._calculations.calculate_signal_returns_with_tc_matrix(signal_df, returns_df, tc=tc, rc=rc) + signal_pnl = self._calculations.calculate_signal_returns_with_tc_matrix( + signal_df, returns_df, tc=tc, rc=rc) signal_pnl.columns = signal_pnl_cols adjusted_weights_matrix = None @@ -2030,51 +2557,67 @@ def optimize_portfolio_weights(self, returns_df, signal_df, signal_pnl_cols, br= # Portfolio is average of the underlying signals: should we sum them or average them or use another # weighting scheme? if br.portfolio_combination is not None: - if br.portfolio_combination == 'sum' and br.portfolio_combination_weights is None: - portfolio = pd.DataFrame(data=signal_pnl.sum(axis=1), index=signal_pnl.index, columns=['Portfolio']) - elif br.portfolio_combination == 'mean' and br.portfolio_combination_weights is None: - portfolio = pd.DataFrame(data=signal_pnl.mean(axis=1), index=signal_pnl.index, columns=['Portfolio']) - - adjusted_weights_matrix = self.calculate_signal_weights_for_portfolio(br, signal_pnl, method='mean') - elif 'weighted' in br.portfolio_combination and isinstance(br.portfolio_combination_weights, dict): + if br.portfolio_combination == "sum" and br.portfolio_combination_weights is None: + portfolio = pd.DataFrame(data=signal_pnl.sum(axis=1), + index=signal_pnl.index, + columns=["Portfolio"]) + elif br.portfolio_combination == "mean" and br.portfolio_combination_weights is None: + portfolio = pd.DataFrame(data=signal_pnl.mean(axis=1), + index=signal_pnl.index, + columns=["Portfolio"]) + + adjusted_weights_matrix = self.calculate_signal_weights_for_portfolio( + br, signal_pnl, method="mean") + elif "weighted" in br.portfolio_combination and isinstance( + br.portfolio_combination_weights, dict): # Get the weights for each asset - adjusted_weights_matrix = self.calculate_signal_weights_for_portfolio(br, signal_pnl, method=br.portfolio_combination) + adjusted_weights_matrix = self.calculate_signal_weights_for_portfolio( + br, signal_pnl, method=br.portfolio_combination) - portfolio = pd.DataFrame(data=(signal_pnl.values * adjusted_weights_matrix), index=signal_pnl.index) + portfolio = pd.DataFrame( + data=(signal_pnl.values * adjusted_weights_matrix), + index=signal_pnl.index) is_all_na = pd.isnull(portfolio).all(axis=1) - portfolio = pd.DataFrame(portfolio.sum(axis=1), columns=['Portfolio']) + portfolio = pd.DataFrame(portfolio.sum(axis=1), + columns=["Portfolio"]) # Overwrite days when every asset PnL was null is NaN with nan portfolio[is_all_na] = np.nan else: # Just assume to take the mean / ie. equal weights for each signal - portfolio = pd.DataFrame(data=signal_pnl.mean(axis=1), index=signal_pnl.index, columns=['Portfolio']) + portfolio = pd.DataFrame(data=signal_pnl.mean(axis=1), + index=signal_pnl.index, + columns=["Portfolio"]) - adjusted_weights_matrix = self.calculate_signal_weights_for_portfolio(br, signal_pnl, method='mean') + adjusted_weights_matrix = self.calculate_signal_weights_for_portfolio( + br, signal_pnl, method="mean") - portfolio_leverage_df = pd.DataFrame(data=np.ones(len(signal_pnl.index)), index=signal_pnl.index, - columns=['Portfolio']) + portfolio_leverage_df = pd.DataFrame( + data=np.ones(len(signal_pnl.index)), index=signal_pnl.index, + columns=["Portfolio"]) # Should we apply vol target on a portfolio level basis? if br.portfolio_vol_adjust is True: # Calculate portfolio leverage - portfolio_leverage_df = self._risk_engine.calculate_leverage_factor(portfolio, - br.portfolio_vol_target, - br.portfolio_vol_max_leverage, - br.portfolio_vol_periods, - br.portfolio_vol_obs_in_year, - br.portfolio_vol_rebalance_freq, - br.portfolio_vol_resample_freq, - br.portfolio_vol_resample_type, - period_shift=br.portfolio_vol_period_shift) + portfolio_leverage_df = self._risk_engine.calculate_leverage_factor( + portfolio, + br.portfolio_vol_target, + br.portfolio_vol_max_leverage, + br.portfolio_vol_periods, + br.portfolio_vol_obs_in_year, + br.portfolio_vol_rebalance_freq, + br.portfolio_vol_resample_freq, + br.portfolio_vol_resample_type, + period_shift=br.portfolio_vol_period_shift) # portfolio, portfolio_leverage_df = risk_engine.calculate_vol_adjusted_returns(portfolio, br = br) # Multiply portfolio leverage * individual signals to get final position signals length_cols = len(signal_df.columns) leverage_matrix = np.transpose( - np.repeat(portfolio_leverage_df.values.flatten()[np.newaxis, :], length_cols, 0)) + np.repeat(portfolio_leverage_df.values.flatten()[np.newaxis, :], + length_cols, 0)) # Final portfolio signals (including signal & portfolio leverage) portfolio_signal = pd.DataFrame( @@ -2088,34 +2631,42 @@ def optimize_portfolio_weights(self, returns_df, signal_df, signal_pnl_cols, br= if 'sum' in br.portfolio_combination: pass elif br.portfolio_combination == 'mean' \ - or (br.portfolio_combination == 'weighted' and isinstance(br.portfolio_combination_weights, dict)): - portfolio_signal = pd.DataFrame(data=(portfolio_signal.values * adjusted_weights_matrix), - index=portfolio_signal.index, - columns=portfolio_signal.columns) + or (br.portfolio_combination == 'weighted' and isinstance( + br.portfolio_combination_weights, dict)): + portfolio_signal = pd.DataFrame( + data=(portfolio_signal.values * adjusted_weights_matrix), + index=portfolio_signal.index, + columns=portfolio_signal.columns) else: # Otherwise it's "mean" - portfolio_signal = pd.DataFrame(data=(portfolio_signal.values * adjusted_weights_matrix), - index=portfolio_signal.index, - columns=portfolio_signal.columns) + portfolio_signal = pd.DataFrame( + data=(portfolio_signal.values * adjusted_weights_matrix), + index=portfolio_signal.index, + columns=portfolio_signal.columns) - return portfolio_signal_before_weighting, portfolio_signal, portfolio_leverage_df, portfolio, individual_leverage_df, signal_pnl - def calculate_signal_weights_for_portfolio(self, br, signal_pnl, method='mean'): + def calculate_signal_weights_for_portfolio( + self, + br: Backtest, + signal_pnl: pd.DataFrame, + method: str = "mean") -> pd.DataFrame: """Calculates the weights of each signal for the portfolio Parameters ---------- br : BacktestRequest - Parameters for the backtest specifying start date, finish data, transaction costs etc. + Parameters for the backtest specifying start date, finish data, + transaction costs etc. signal_pnl : pd.DataFrame Contains the daily P&L for the portfolio method : String - 'mean' - assumes equal weighting for each signal - 'weighted' - can use predefined user weights (eg. if we assign weighting of 1, 1, 0.5, for three signals - the third signal will have a weighting of half versus the others) + "mean" - assumes equal weighting for each signal + "weighted" - can use predefined user weights (eg. if we assign + weighting of 1, 1, 0.5, for three signals the third signal will + have a weighting of half versus the others) weights : dict Portfolio weights @@ -2126,25 +2677,30 @@ def calculate_signal_weights_for_portfolio(self, br, signal_pnl, method='mean'): Contains the portfolio weights """ - if method == 'mean': + if method == "mean": weights_vector = np.ones(len(signal_pnl.columns)) - elif method == 'weighted' or 'weighted-sum': + elif method == "weighted" or "weighted-sum": # Get the weights for each asset - weights_vector = np.array([float(br.portfolio_combination_weights[col]) for col in signal_pnl.columns]) + weights_vector = np.array( + [float(br.portfolio_combination_weights[col]) for col in + signal_pnl.columns]) # Repeat this down for every day - weights_matrix = np.repeat(weights_vector[np.newaxis, :], len(signal_pnl.index), 0) + weights_matrix = np.repeat(weights_vector[np.newaxis, :], + len(signal_pnl.index), 0) # Where we don't have old price data, make the weights 0 there ind = np.isnan(signal_pnl.values) weights_matrix[ind] = 0 - if method != 'weighted-sum': + if method != "weighted-sum": # The total weights will vary, as historically might not have all the assets trading total_weights = np.sum(weights_matrix, axis=1) # Replicate across columns - total_weights = np.transpose(np.repeat(total_weights[np.newaxis, :], len(signal_pnl.columns), 0)) + total_weights = np.transpose( + np.repeat(total_weights[np.newaxis, :], + len(signal_pnl.columns), 0)) # To avoid divide by zero total_weights[total_weights == 0.0] = 1.0 @@ -2156,20 +2712,26 @@ def calculate_signal_weights_for_portfolio(self, br, signal_pnl, method='mean'): return weights_matrix + ####################################################################################################################### class RiskEngine(object): - """Adjusts signal weighting according to risk constraints (volatility targeting and position limit constraints) + """Adjusts signal weighting according to risk constraints (volatility + targeting and position limit constraints) """ - def calculate_vol_adjusted_index_from_prices(self, prices_df, br): + def calculate_vol_adjusted_index_from_prices( + self, + prices_df: pd.DataFrame, + br: BacktestRequest) -> pd.DataFrame: """Adjusts an index of prices for a vol target Parameters ---------- br : BacktestRequest - Parameters for the backtest specifying start date, finish data, transaction costs etc. + Parameters for the backtest specifying start date, finish data, + transaction costs etc. asset_a_df : pd.DataFrame Asset prices to be traded @@ -2180,20 +2742,26 @@ def calculate_vol_adjusted_index_from_prices(self, prices_df, br): calculations = Calculations() - returns_df, leverage_df = self.calculate_vol_adjusted_returns(prices_df, br, returns=False) + returns_df, leverage_df = self.calculate_vol_adjusted_returns( + prices_df, br, returns=False) if br.cum_index == 'mult': return calculations.create_mult_index(returns_df) elif br.cum_index == 'add': return calculations.create_add_index(returns_df) - def calculate_vol_adjusted_returns(self, returns_df, br, returns=True): + def calculate_vol_adjusted_returns( + self, + returns_df: pd.DataFrame, + br: BacktestRequest, + returns: bool = True) -> (pd.DataFrame, pd.DataFrame): """Adjusts returns for a vol target Parameters ---------- br : BacktestRequest - Parameters for the backtest specifying start date, finish data, transaction costs etc. + Parameters for the backtest specifying start date, finish data, + transaction costs etc. returns_a_df : pd.DataFrame Asset returns to be traded @@ -2207,20 +2775,33 @@ def calculate_vol_adjusted_returns(self, returns_df, br, returns=True): if not returns: returns_df = calculations.calculate_returns(returns_df) leverage_df = self.calculate_leverage_factor(returns_df, - br.portfolio_vol_target, br.portfolio_vol_max_leverage, - br.portfolio_vol_periods, br.portfolio_vol_obs_in_year, - br.portfolio_vol_rebalance_freq, br.portfolio_vol_resample_freq, + br.portfolio_vol_target, + br.portfolio_vol_max_leverage, + br.portfolio_vol_periods, + br.portfolio_vol_obs_in_year, + br.portfolio_vol_rebalance_freq, + br.portfolio_vol_resample_freq, br.portfolio_vol_resample_type, period_shift=br.portfolio_vol_period_shift) - vol_returns_df = calculations.calculate_signal_returns_with_tc_matrix(leverage_df, returns_df, tc=br.spot_tc_bp) + vol_returns_df = calculations.calculate_signal_returns_with_tc_matrix( + leverage_df, returns_df, tc=br.spot_tc_bp) vol_returns_df.columns = returns_df.columns return vol_returns_df, leverage_df - def calculate_leverage_factor(self, returns_df, vol_target, vol_max_leverage, vol_periods=60, vol_obs_in_year=252, - vol_rebalance_freq='BM', resample_freq=None, resample_type='mean', - returns=True, period_shift=0): + def calculate_leverage_factor( + self, + returns_df: pd.DataFrame, + vol_target: float, + vol_max_leverage: float, + vol_periods: int = 60, + vol_obs_in_year: int = 252, + vol_rebalance_freq: str = "BM", + resample_freq: str = None, + resample_type: str = "mean", + returns: bool = True, + period_shift: int = 0) -> pd.DataFrame: """Calculates the time series of leverage for a specified vol target Parameters @@ -2259,7 +2840,8 @@ def calculate_leverage_factor(self, returns_df, vol_target, vol_max_leverage, vo if not returns: returns_df = calculations.calculate_returns(returns_df) roll_vol_df = calculations.rolling_volatility(returns_df, - periods=vol_periods, obs_in_year=vol_obs_in_year).shift( + periods=vol_periods, + obs_in_year=vol_obs_in_year).shift( period_shift) # calculate the leverage as function of vol target (with max lev constraint) @@ -2269,9 +2851,12 @@ def calculate_leverage_factor(self, returns_df, vol_target, vol_max_leverage, vo lev_df[lev_df > vol_max_leverage] = vol_max_leverage if resample_type is not None: - lev_df = filter.resample_time_series_frequency(lev_df, vol_rebalance_freq, resample_type) + lev_df = filter.resample_time_series_frequency(lev_df, + vol_rebalance_freq, + resample_type) - returns_df, lev_df = calculations.join_left_fill_right(returns_df, lev_df) + returns_df, lev_df = calculations.join_left_fill_right(returns_df, + lev_df) # # in case leverage changes on a weekend do outer join, and fill down # # the leverage @@ -2281,11 +2866,16 @@ def calculate_leverage_factor(self, returns_df, vol_target, vol_max_leverage, vo # # now realign back to days when we trade # returns_df, lev_df = returns_df.align(lev_df, join='left', axis=0) - lev_df[0:vol_periods] = np.nan # ignore the first elements before the vol window kicks in + lev_df[ + 0:vol_periods] = np.nan # ignore the first elements before the vol window kicks in return lev_df - def calculate_position_clip_adjustment(self, portfolio_net_exposure, portfolio_total_exposure, br): + def calculate_position_clip_adjustment( + self, + portfolio_net_exposure: pd.DataFrame, + portfolio_total_exposure: pd.DataFrame, + br) -> pd.DataFrame: """Calculate the leverage adjustment that needs to be made in the portfolio such that either the net exposure or the absolute exposure fits within predefined limits @@ -2307,42 +2897,54 @@ def calculate_position_clip_adjustment(self, portfolio_net_exposure, portfolio_t # Adjust leverage of portfolio based on max NET position sizes if br.max_net_exposure is not None: - portfolio_net_exposure = portfolio_net_exposure.shift(br.position_clip_period_shift) + portfolio_net_exposure = portfolio_net_exposure.shift( + br.position_clip_period_shift) # add further constraints on portfolio (total net amount of longs and short) - position_clip_adjustment = pd.DataFrame(data=np.ones(len(portfolio_net_exposure.index)), - index=portfolio_net_exposure.index, - columns=['Portfolio']) + position_clip_adjustment = pd.DataFrame( + data=np.ones(len(portfolio_net_exposure.index)), + index=portfolio_net_exposure.index, + columns=['Portfolio']) portfolio_abs_exposure = portfolio_net_exposure.abs() - # For those periods when the absolute net positioning is greater than our limit cut down the leverage - position_clip_adjustment[(portfolio_abs_exposure > br.max_net_exposure).values] = \ + # For those periods when the absolute net positioning is greater + # than our limit cut down the leverage + position_clip_adjustment[ + (portfolio_abs_exposure > br.max_net_exposure).values] = \ br.max_net_exposure / portfolio_abs_exposure # Adjust leverage of portfolio based on max TOTAL position sizes if br.max_abs_exposure is not None: - portfolio_abs_exposure = portfolio_abs_exposure.shift(br.position_clip_period_shift) - - # add further constraints on portfolio (total net amount of longs and short) - position_clip_adjustment = pd.DataFrame(data=np.ones(len(portfolio_abs_exposure.index)), - index=portfolio_abs_exposure.index, - columns=['Portfolio']) - - # For those periods when the absolute TOTAL positioning is greater than our limit cut down the leverage - position_clip_adjustment[(portfolio_total_exposure > br.max_abs_exposure).values] = \ + portfolio_abs_exposure = portfolio_abs_exposure.shift( + br.position_clip_period_shift) + + # add further constraints on portfolio (total net amount of + # longs and short) + position_clip_adjustment = pd.DataFrame( + data=np.ones(len(portfolio_abs_exposure.index)), + index=portfolio_abs_exposure.index, + columns=['Portfolio']) + + # For those periods when the absolute TOTAL positioning is + # greater than our limit cut down the leverage + position_clip_adjustment[ + (portfolio_total_exposure > br.max_abs_exposure).values] = \ br.max_abs_exposure / portfolio_total_exposure - # Only allow the position clip adjustment to change on certain days (eg. 'BM' = month end) + # Only allow the position clip adjustment to change on certain + # days (eg. 'BM' = month end) if br.position_clip_rebalance_freq is not None: calculations = Calculations() filter = Filter() - position_clip_adjustment = filter.resample_time_series_frequency(position_clip_adjustment, - br.position_clip_rebalance_freq, - br.position_clip_resample_type) + position_clip_adjustment = filter.resample_time_series_frequency( + position_clip_adjustment, + br.position_clip_rebalance_freq, + br.position_clip_resample_type) - a, position_clip_adjustment = calculations.join_left_fill_right(portfolio_net_exposure, - position_clip_adjustment) + a, position_clip_adjustment = calculations.join_left_fill_right( + portfolio_net_exposure, + position_clip_adjustment) return position_clip_adjustment diff --git a/finmarketpy/curve/volatility/fxoptionspricer.py b/finmarketpy/curve/volatility/fxoptionspricer.py index 9b97e55..0fa5ab0 100644 --- a/finmarketpy/curve/volatility/fxoptionspricer.py +++ b/finmarketpy/curve/volatility/fxoptionspricer.py @@ -23,19 +23,22 @@ from finmarketpy.curve.abstractpricer import AbstractPricer from finmarketpy.curve.rates.fxforwardspricer import FXForwardsPricer -from financepy.finutils.FinDate import FinDate -from financepy.models.FinModelBlackScholes import FinModelBlackScholes -from financepy.products.fx.FinFXVanillaOption import FinFXVanillaOption -from financepy.finutils.FinGlobalTypes import FinOptionTypes -from financepy.products.fx.FinFXMktConventions import * +from financepy.utils.date import Date +from financepy.models.black_scholes import BlackScholes +from financepy.products.fx.fx_vanilla_option import FXVanillaOption +from financepy.utils.global_types import OptionTypes + +# from financepy.products.fx.FinFXMktConventions import * market_constants = MarketConstants() + class FXOptionsPricer(AbstractPricer): """Prices various vanilla FX options, using FinancePy underneath. """ - def __init__(self, fx_vol_surface=None, premium_output=market_constants.fx_options_premium_output, + def __init__(self, fx_vol_surface=None, + premium_output=market_constants.fx_options_premium_output, delta_output=market_constants.fx_options_delta_output): self._calendar = Calendar() @@ -44,9 +47,19 @@ def __init__(self, fx_vol_surface=None, premium_output=market_constants.fx_optio self._premium_output = premium_output self._delta_output = delta_output - def price_instrument(self, cross, horizon_date, strike, expiry_date=None, vol=None, notional=1000000, - contract_type='european-call', tenor=None, - fx_vol_surface=None, premium_output=None, delta_output=None, depo_tenor=None, use_atm_quoted=False, + def price_instrument(self, + cross, + horizon_date, + strike, + expiry_date=None, + vol=None, notional=1000000, + contract_type='european-call', + tenor=None, + fx_vol_surface=None, + premium_output=None, + delta_output=None, + depo_tenor=None, + use_atm_quoted=False, return_as_df=True): """Prices FX options for horizon dates/expiry dates given by the user from FX spot rates, FX volatility surface and deposit rates. @@ -62,11 +75,11 @@ def price_instrument(self, cross, horizon_date, strike, expiry_date=None, vol=No strike : np.ndarray, float or str Strike of option - eg. 'atm' - at-the-money - eg. 'atmf' - at-the-money forward - eg. 'atms' - at-the-money spot - eg. '25d-otm' - out-of-the-money 25d - eg. '10d-otm + eg. "atm" - at-the-money + eg. "atmf" - at-the-money forward + eg. "atms" - at-the-money spot + eg. "25d-otm" - out-of-the-money 25d + eg. "10d-otm" - out-of-the-money 10d expiry_date : DateTimeIndex (optional) Expiry dates for options @@ -80,7 +93,7 @@ def price_instrument(self, cross, horizon_date, strike, expiry_date=None, vol=No contract_type : str What type of option are we pricing? - eg. 'european-call' + eg. "european-call" tenor : str (optional) Tenor of option @@ -131,10 +144,11 @@ def price_instrument(self, cross, horizon_date, strike, expiry_date=None, vol=No else: expiry_date = pd.DatetimeIndex(expiry_date) else: - expiry_date = self._calendar.get_expiry_date_from_horizon_date(horizon_date, tenor, cal=cross) + expiry_date = self._calendar.get_expiry_date_from_horizon_date( + horizon_date, tenor, cal=cross) # If the strike hasn't been supplied need to work this out - if not(isinstance(strike, np.ndarray)): + if not (isinstance(strike, np.ndarray)): old_strike = strike if isinstance(strike, str): @@ -145,7 +159,7 @@ def price_instrument(self, cross, horizon_date, strike, expiry_date=None, vol=No strike.fill(old_strike) # If the vol hasn't been supplied need to work this out - if not(isinstance(vol, np.ndarray)): + if not (isinstance(vol, np.ndarray)): if vol is None: vol = np.nan @@ -166,56 +180,73 @@ def _price_option(contract_type_, contract_type_fin_): # If we have a "key strike" need to fit the vol surface if isinstance(strike[i], str): - if not(built_vol_surface): - + if not (built_vol_surface): fx_vol_surface.build_vol_surface(horizon_date[i]) - fx_vol_surface.extract_vol_surface(num_strike_intervals=None) + fx_vol_surface.extract_vol_surface( + num_strike_intervals=None) built_vol_surface = True # Delta neutral strike/or whatever strike is quoted as ATM # usually this is ATM delta neutral strike, but can sometimes be ATMF for some Latam # Take the vol directly quoted, rather than getting it from building vol surface - if strike[i] == 'atm': + if strike[i] == "atm": strike[i] = fx_vol_surface.get_atm_strike(tenor) if use_atm_quoted: - vol[i] = fx_vol_surface.get_atm_quoted_vol(tenor) / 100.0 + vol[i] = fx_vol_surface.get_atm_quoted_vol( + tenor) / 100.0 else: - vol[i] = fx_vol_surface.get_atm_vol(tenor) / 100.0 # interpolated - elif strike[i] == 'atms': - strike[i] = fx_vol_surface.get_spot() # Interpolate vol later - elif strike[i] == 'atmf': + vol[i] = fx_vol_surface.get_atm_vol( + tenor) / 100.0 # interpolated + elif strike[i] == "atms": + strike[ + i] = fx_vol_surface.get_spot() # Interpolate vol later + elif strike[i] == "atmf": # Quoted tenor, no need to interpolate - strike[i] = float(fx_vol_surface.get_all_market_data()[cross + ".close"][horizon_date[i]]) \ - + (float(fx_vol_surface.get_all_market_data()[cross + tenor + ".close"][horizon_date[i]]) \ - / self._fx_forwards_pricer.get_forwards_divisor(cross[3:6])) + strike[i] = float(fx_vol_surface.get_all_market_data()[ + cross + ".close"][ + horizon_date[i]]) \ + + (float( + fx_vol_surface.get_all_market_data()[ + cross + tenor + ".close"][horizon_date[i]]) \ + / self._fx_forwards_pricer.get_forwards_divisor( + cross[3:6])) # Interpolate vol later # TODO: work on 25d and 10d strikes - elif strike[i] == '25d-otm': - if 'call' in contract_type_: - strike[i] = fx_vol_surface.get_25d_call_strike(tenor) - - vol[i] = fx_vol_surface.get_25d_call_vol(tenor) / 100.0 - elif 'put' in contract_type_: - strike[i] = fx_vol_surface.get_25d_put_strike(tenor) - vol[i] = fx_vol_surface.get_25d_put_vol(tenor) / 100.0 - - elif strike[i] == '10d-otm': - if 'call' in contract_type_: - strike[i] = fx_vol_surface.get_10d_call_strike(tenor) - vol[i] = fx_vol_surface.get_10d_call_vol(tenor) / 100.0 - elif 'put' in contract_type_: - strike[i] = fx_vol_surface.get_10d_put_strike(tenor) - vol[i] = fx_vol_surface.get_10d_put_vol(tenor) / 100.0 - - if not(built_vol_surface): + elif strike[i] == "25d-otm": + if "call" in contract_type_: + strike[i] = fx_vol_surface.get_25d_call_strike( + tenor) + + vol[i] = fx_vol_surface.get_25d_call_vol( + tenor) / 100.0 + elif "put" in contract_type_: + strike[i] = fx_vol_surface.get_25d_put_strike( + tenor) + vol[i] = fx_vol_surface.get_25d_put_vol( + tenor) / 100.0 + + elif strike[i] == "10d-otm": + if "call" in contract_type_: + strike[i] = fx_vol_surface.get_10d_call_strike( + tenor) + vol[i] = fx_vol_surface.get_10d_call_vol( + tenor) / 100.0 + elif "put" in contract_type_: + strike[i] = fx_vol_surface.get_10d_put_strike( + tenor) + vol[i] = fx_vol_surface.get_10d_put_vol( + tenor) / 100.0 + + if not built_vol_surface: try: fx_vol_surface.build_vol_surface(horizon_date[i]) except: - logger.warn("Failed to build vol surface for " + str(horizon_date) + ", won't be able to interpolate vol") + logger.warn("Failed to build vol surface for " + str( + horizon_date) + ", won't be able to interpolate vol") # fx_vol_surface.extract_vol_surface(num_strike_intervals=None) # If an implied vol hasn't been provided, interpolate that one, fit the vol surface (if hasn't already been @@ -223,22 +254,29 @@ def _price_option(contract_type_, contract_type_fin_): if np.isnan(vol[i]): if tenor is None: - vol[i] = fx_vol_surface.calculate_vol_for_strike_expiry(strike[i], expiry_date=expiry_date[i], tenor=None) + vol[ + i] = fx_vol_surface.calculate_vol_for_strike_expiry( + strike[i], expiry_date=expiry_date[i], tenor=None) else: - vol[i] = fx_vol_surface.calculate_vol_for_strike_expiry(strike[i], expiry_date=None, tenor=tenor) + vol[ + i] = fx_vol_surface.calculate_vol_for_strike_expiry( + strike[i], expiry_date=None, tenor=tenor) - model = FinModelBlackScholes(float(vol[i])) + model = BlackScholes(float(vol[i])) - logger.info("Pricing " + contract_type_ + " option, horizon date = " + str(horizon_date[i]) + ", expiry date = " - + str(expiry_date[i])) + logger.info( + "Pricing " + contract_type_ + " option, horizon date = " + str( + horizon_date[i]) + ", expiry date = " + + str(expiry_date[i])) - option = FinFXVanillaOption(self._findate(expiry_date[i]), strike[i], - cross, contract_type_fin_, notional, cross[0:3]) + option = FXVanillaOption( + self._findate(expiry_date[i]), strike[i], + cross, contract_type_fin_, notional, cross[0:3]) spot[i] = fx_vol_surface.get_spot() """ FinancePy will return the value in the following dictionary for values - {'v': vdf, + {"v": vdf, "cash_dom": cash_dom, "cash_for": cash_for, "pips_dom": pips_dom, @@ -251,15 +289,30 @@ def _price_option(contract_type_, contract_type_fin_): "ccy_for": self._forName} """ - option_values[i] = option_values[i] + option.value(self._findate(horizon_date[i]), - spot[i], fx_vol_surface.get_dom_discount_curve(), - fx_vol_surface.get_for_discount_curve(), - model)[premium_output.replace('-', '_')] - - intrinsic_values[i] = intrinsic_values[i] + option.value(self._findate(expiry_date[i]), - spot[i], fx_vol_surface.get_dom_discount_curve(), + option_values[i] = option_values[i] + \ + option.value(self._findate(horizon_date[i]), + spot[i], + fx_vol_surface.get_dom_discount_curve(), fx_vol_surface.get_for_discount_curve(), - model)[premium_output.replace('-', '_')] + model)[ + premium_output.replace("-", "_")] + + # TODO instrinsic value calculations need checking! + if contract_type == "european-call": + intrinsic_values[i] = (max(spot[i] - strike[i], 0)) + elif contract_type == "european-put": + intrinsic_values[i] = (max(strike[i] - spot[i], 0)) + elif contract_type == "european-straddle" or contract_type == "european-strangle": + intrinsic_values[i] = (max(spot[i] - strike[i], 0) + max( + strike[i] - spot[i], 0)) + + if "pct-for" in premium_output: + intrinsic_values[i] = intrinsic_values[i] / spot[i] + + # intrinsic_values[i] = intrinsic_values[i] + option.value(self._findate(expiry_date[i]), + # spot[i], fx_vol_surface.get_dom_discount_curve(), + # fx_vol_surface.get_for_discount_curve(), + # model)[premium_output.replace("-", "_")] """FinancePy returns this dictionary for deltas {"pips_spot_delta": pips_spot_delta, @@ -268,39 +321,43 @@ def _price_option(contract_type_, contract_type_fin_): "pct_fwd_delta_prem_adj": pct_fwd_delta_prem_adj} """ - delta[i] = delta[i] + option.delta(self._findate(horizon_date[i]), - spot[i], fx_vol_surface.get_dom_discount_curve(), - fx_vol_surface.get_for_discount_curve(), model)[delta_output.replace('-', '_')] + delta[i] = delta[i] + \ + option.delta(self._findate(horizon_date[i]), + spot[i], + fx_vol_surface.get_dom_discount_curve(), + fx_vol_surface.get_for_discount_curve(), + model)[delta_output.replace('-', '_')] - if contract_type == 'european-call': - contract_type_fin = FinOptionTypes.EUROPEAN_CALL + if contract_type == "european-call": + contract_type_fin = OptionTypes.EUROPEAN_CALL _price_option(contract_type, contract_type_fin) - elif contract_type == 'european-put': - contract_type_fin = FinOptionTypes.EUROPEAN_PUT + elif contract_type == "european-put": + contract_type_fin = OptionTypes.EUROPEAN_PUT _price_option(contract_type, contract_type_fin) - elif contract_type == 'european-straddle' or contract_type == 'european-strangle': - contract_type = 'european-call' - contract_type_fin = FinOptionTypes.EUROPEAN_CALL + elif contract_type == "european-straddle" or contract_type == "european-strangle": + contract_type = "european-call" + contract_type_fin = OptionTypes.EUROPEAN_CALL _price_option(contract_type, contract_type_fin) - contract_type = 'european-put' - contract_type_fin = FinOptionTypes.EUROPEAN_PUT + contract_type = "european-put" + contract_type_fin = OptionTypes.EUROPEAN_PUT _price_option(contract_type, contract_type_fin) if return_as_df: option_prices_df = pd.DataFrame(index=horizon_date) - option_prices_df[cross + '-option-price.' + field] = option_values - option_prices_df[cross + '.' + field] = spot - option_prices_df[cross + '-strike.' + field] = strike - option_prices_df[cross + '-vol.' + field] = vol - option_prices_df[cross + '-delta.' + field] = delta - option_prices_df[cross + '.expiry-date'] = expiry_date - option_prices_df[cross + '-intrinsic-value.' + field] = intrinsic_values + option_prices_df[cross + "-option-price." + field] = option_values + option_prices_df[cross + "." + field] = spot + option_prices_df[cross + "-strike." + field] = strike + option_prices_df[cross + "-vol." + field] = vol + option_prices_df[cross + "-delta." + field] = delta + option_prices_df[cross + ".expiry-date"] = expiry_date + option_prices_df[ + cross + "-intrinsic-value." + field] = intrinsic_values return option_prices_df @@ -314,5 +371,6 @@ def get_day_count_conv(self, currency): def _findate(self, timestamp): - return FinDate(timestamp.day, timestamp.month, timestamp.year, - hh=timestamp.hour, mm=timestamp.minute, ss=timestamp.second) \ No newline at end of file + return Date(timestamp.day, timestamp.month, timestamp.year, + hh=timestamp.hour, mm=timestamp.minute, + ss=timestamp.second) diff --git a/finmarketpy/curve/volatility/fxvolsurface.py b/finmarketpy/curve/volatility/fxvolsurface.py index f1242ff..3a928d0 100644 --- a/finmarketpy/curve/volatility/fxvolsurface.py +++ b/finmarketpy/curve/volatility/fxvolsurface.py @@ -1,47 +1,38 @@ -__author__ = 'saeedamen' # Saeed Amen +__author__ = "saeedamen" # -# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro +# Copyright 2020 Cuemacro # -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the -# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and limitations under the License. +# See the License for the specific language governing permissions and +# limitations under the License. # + import pandas as pd import numpy as np from financepy.market.curves.discount_curve_flat import DiscountCurveFlat from financepy.utils.date import Date -# tested with financepy 0.193 / 0.220 - -# Future versions of FinancePy will roll FXFinVolSurfacePlus into FinFXVolSurface - -try: - from financepy.market.volatility.fx_vol_surface_plus import \ - FXVolSurfacePlus \ - as FinFXVolSurface - from financepy.market.volatility.fx_vol_surface_plus import FinFXATMMethod - from financepy.market.volatility.fx_vol_surface_plus import \ - FinFXDeltaMethod - from financepy.market.volatility.fx_vol_surface_plus import vol_function - from financepy.market.volatility.fx_vol_surface_plus import \ - VolFunctionTypes -except: - from financepy.market.volatility.fx_vol_surface_plus import \ - FXVolSurface \ - as FinFXVolSurface - from financepy.market.volatility.fx_vol_surface_plus import FinFXATMMethod - from financepy.market.volatility.fx_vol_surface_plus import \ - FinFXDeltaMethod - from financepy.market.volatility.fx_vol_surface_plus import vol_function - from financepy.market.volatility.fx_vol_surface_plus import \ - VolFunctionTypes +# Tested with financepy 0.310 only + +from financepy.market.volatility.fx_vol_surface_plus import \ + FXVolSurfacePlus \ + as FinFXVolSurface # So there is no clash with FXVolSurface from finmarketpy +from financepy.market.volatility.fx_vol_surface_plus import FinFXATMMethod +from financepy.market.volatility.fx_vol_surface_plus import \ + FinFXDeltaMethod +from financepy.market.volatility.fx_vol_surface_plus import vol_function +from financepy.market.volatility.fx_vol_surface_plus import \ + VolFunctionTypes from financepy.utils.global_types import FinSolverTypes @@ -61,7 +52,7 @@ class FXVolSurface(AbstractVolSurface): """ - def __init__(self, market_df=None, asset=None, field='close', + def __init__(self, market_df=None, asset=None, field="close", tenors=market_constants.fx_options_tenor_for_interpolation, vol_function_type=market_constants.fx_options_vol_function_type, atm_method=market_constants.fx_options_atm_method, @@ -78,34 +69,34 @@ def __init__(self, market_df=None, asset=None, field='close', Market data with spot, FX volatility surface, FX forwards and base depos asset : str - Eg. 'EURUSD' + Eg. "EURUSD" field : str Market data field to use - default - 'close' + default - "close" tenors : str(list) Tenors to be used (we need to avoid tenors, where there might be NaNs) vol_function_type : str What type of interpolation scheme to use - default - 'CLARK5' (also 'CLARK', 'BBG' and 'SABR') + default - "CLARK5" (also "CLARK", "BBG" and "SABR") atm_method : str How is the ATM quoted? Eg. delta neutral, ATMF etc. - default - 'fwd-delta-neutral-premium-adj' + default - "fwd-delta-neutral-premium-adj" delta_method : str Spot delta, forward delta etc. - default - 'spot-delta' + default - "spot-delta" solver : str Which solver to use in FX vol surface calibration? - default - 'nelmer-mead' + default - "nelmer-mead" alpha : float Between 0 and 1 (default 0.5) @@ -128,7 +119,7 @@ def __init__(self, market_df=None, asset=None, field='close', for_name_base = asset[0:3] dom_name_terms = asset[3:6] - field = '.' + field + field = "." + field # CAREFUL: need to divide by 100 for depo rate, ie. 0.0346 = 3.46% self._forCCRate = market_df[ @@ -148,45 +139,45 @@ def __init__(self, market_df=None, asset=None, field='close', self._risk_reversal10DeltaVols = market_df[ [asset + "10R" + t + field for t in tenors]].values - if vol_function_type == 'CLARK': + if vol_function_type == "CLARK": self._vol_function_type = VolFunctionTypes.CLARK - elif vol_function_type == 'CLARK5': + elif vol_function_type == "CLARK5": self._vol_function_type = VolFunctionTypes.CLARK5 - elif vol_function_type == 'BBG': + elif vol_function_type == "BBG": self._vol_function_type = VolFunctionTypes.BBG - # Note: currently SABR isn't fully implemented in FinancePy - elif vol_function_type == 'SABR': + # Note: currently SABR isn"t fully implemented in FinancePy + elif vol_function_type == "SABR": self._vol_function_type = VolFunctionTypes.SABR - elif vol_function_type == 'SABR3': + elif vol_function_type == "SABR3": self._vol_function_type = VolFunctionTypes.SABR3 # What does ATM mean? (for most - if atm_method == 'fwd-delta-neutral': # ie. strike such that a straddle would be delta neutral + if atm_method == "fwd-delta-neutral": # ie. strike such that a straddle would be delta neutral self._atm_method = FinFXATMMethod.FWD_DELTA_NEUTRAL - elif atm_method == 'fwd-delta-neutral-premium-adj': + elif atm_method == "fwd-delta-neutral-premium-adj": self._atm_method = FinFXATMMethod.FWD_DELTA_NEUTRAL_PREM_ADJ - elif atm_method == 'spot': # ATM is spot + elif atm_method == "spot": # ATM is spot self._atm_method = FinFXATMMethod.SPOT - elif atm_method == 'fwd': # ATM is forward + elif atm_method == "fwd": # ATM is forward self._atm_method = FinFXATMMethod.FWD # How are the deltas quoted? - if delta_method == 'spot-delta': + if delta_method == "spot-delta": self._delta_method = FinFXDeltaMethod.SPOT_DELTA - elif delta_method == 'fwd-delta': + elif delta_method == "fwd-delta": self._delta_method = FinFXDeltaMethod.FORWARD_DELTA - elif delta_method == 'spot-delta-prem-adj': + elif delta_method == "spot-delta-prem-adj": self._delta_method = FinFXDeltaMethod.SPOT_DELTA_PREM_ADJ - elif delta_method == 'fwd-delta-prem-adj': + elif delta_method == "fwd-delta-prem-adj": self._delta_method = FinFXDeltaMethod.FORWARD_DELTA_PREM_ADJ # Which solver to use in FX vol surface calibration - if solver == 'nelmer-mead': + if solver == "nelmer-mead": self._solver = FinSolverTypes.NELDER_MEAD - elif solver == 'nelmer-mead-numba': + elif solver == "nelmer-mead-numba": self._solver = FinSolverTypes.NELDER_MEAD_NUMBA - elif solver == 'cg': + elif solver == "cg": self._solver = FinSolverTypes.CONJUGATE_GRADIENT self._alpha = alpha @@ -208,12 +199,12 @@ def build_vol_surface(self, value_date): depo_tenor : str Depo tenor to use - default - '1M' + default - "1M" field : str Market data field to use - default - 'close' + default - "close" """ self._value_date = self._market_util.parse_date(value_date) @@ -234,31 +225,32 @@ def build_vol_surface(self, value_date): self._spot = float(self._spot_history[date_index][0]) # New implementation in FinancePy also uses 10d for interpolation - self._fin_fx_vol_surface = FinFXVolSurface(value_fin_date, - self._spot, - self._asset, - self._asset[0:3], - dom_discount_curve, - for_discount_curve, - self._tenors.copy(), - self._atm_vols[date_index][ - 0], - self._market_strangle25DeltaVols[ - date_index][0], - self._risk_reversal25DeltaVols[ - date_index][0], - self._market_strangle10DeltaVols[ - date_index][0], - self._risk_reversal10DeltaVols[ - date_index][0], - self._alpha, - atmMethod=self._atm_method, - deltaMethod=self._delta_method, - volatility_function_type=self._vol_function_type, - finSolverType=self._solver, - tol=self._tol) # TODO add tol - - def calculate_vol_for_strike_expiry(self, K, expiry_date=None, tenor='1M'): + self._fin_fx_vol_surface = FinFXVolSurface( + value_fin_date, + self._spot, + self._asset, + self._asset[0:3], + dom_discount_curve, + for_discount_curve, + self._tenors.copy(), + self._atm_vols[date_index][ + 0], + self._market_strangle25DeltaVols[ + date_index][0], + self._risk_reversal25DeltaVols[ + date_index][0], + self._market_strangle10DeltaVols[ + date_index][0], + self._risk_reversal10DeltaVols[ + date_index][0], + self._alpha, + atmMethod=self._atm_method, + deltaMethod=self._delta_method, + volatility_function_type=self._vol_function_type, + finSolverType=self._solver, + tol=self._tol) # TODO add tol + + def calculate_vol_for_strike_expiry(self, K, expiry_date=None, tenor="1M"): """Calculates the implied_vol volatility for a given strike and tenor (or expiry date, if specified). The expiry date/broken dates are intepolated linearly in variance space. @@ -273,7 +265,7 @@ def calculate_vol_for_strike_expiry(self, K, expiry_date=None, tenor='1M'): tenor : str (optional) Tenor of option - default - '1M' + default - "1M" Returns ------- @@ -283,8 +275,12 @@ def calculate_vol_for_strike_expiry(self, K, expiry_date=None, tenor='1M'): if expiry_date is not None: expiry_date = self._findate( self._market_util.parse_date(expiry_date)) - return self._fin_fx_vol_surface.volatilityFromStrikeDate(K, - expiry_date) + try: + return self._fin_fx_vol_surface.vol_from_strike_dt( + K, expiry_date) + except: + return self._fin_fx_vol_surface.volatility_from_strike_date( + K, expiry_date) else: try: tenor_index = self._get_tenor_index(tenor) @@ -320,8 +316,9 @@ def calculate_vol_for_delta_expiry(self, delta_call, expiry_date=None): def extract_vol_surface(self, num_strike_intervals=60, low_K_pc=0.95, high_K_pc=1.05): - """Creates an interpolated implied vol surface which can be plotted (in strike space), and also in delta - space for key strikes (ATM, 25d call and put). Also for key strikes converts from delta to strike space. + """Creates an interpolated implied vol surface which can be plotted + (in strike space), and also in delta space for key strikes (ATM, 25d + call and put). Also for key strikes converts from delta to strike space. Parameters ---------- @@ -352,12 +349,12 @@ def extract_vol_surface(self, num_strike_intervals=60, low_K_pc=0.95, df_vol_surface_quoted_points = pd.DataFrame( columns=self._fin_fx_vol_surface._tenors) - # Note, at present we're not using 10d strikes - quoted_strikes_names = ['ATM', 'STR_25D_MS', 'RR_25D_P', 'STR_10D_MS', - 'RR_10D_P'] - key_strikes_names = ['K_10D_P', 'K_10D_P_MS', 'K_25D_P', 'K_25D_P_MS', - 'ATM', 'K_25D_C', 'K_25D_C_MS', 'K_10D_C', - 'K_10D_C_MS'] + # Note, at present we"re not using 10d strikes + quoted_strikes_names = ["ATM", "STR_25D_MS", "RR_25D_P", "STR_10D_MS", + "RR_10D_P"] + key_strikes_names = ["K_10D_P", "K_10D_P_MS", "K_25D_P", "K_25D_P_MS", + "ATM", "K_25D_C", "K_25D_C_MS", "K_10D_C", + "K_10D_C_MS"] # Get max/min strikes to interpolate (from the longest dated tenor) low_K = self._fin_fx_vol_surface._K_25D_P[-1] * low_K_pc @@ -445,14 +442,14 @@ def extract_vol_surface(self, num_strike_intervals=60, low_K_pc=0.95, index=key_strikes_names, data=key_vols) df_vol_dict = {} - df_vol_dict['vol_surface_implied_pdf'] = df_vol_surface_implied_pdf - df_vol_dict['vol_surface_strike_space'] = df_vol_surface_strike_space - df_vol_dict['vol_surface_delta_space'] = df_vol_surface_delta_space - df_vol_dict['vol_surface_delta_space_exc_ms'] = \ - df_vol_surface_delta_space[ - ~df_vol_surface_delta_space.index.str.contains('_MS')] - df_vol_dict['vol_surface_quoted_points'] = df_vol_surface_quoted_points - df_vol_dict['deltas_vs_strikes'] = df_deltas_vs_strikes + df_vol_dict["vol_surface_implied_pdf"] = df_vol_surface_implied_pdf + df_vol_dict["vol_surface_strike_space"] = df_vol_surface_strike_space + df_vol_dict["vol_surface_delta_space"] = df_vol_surface_delta_space + df_vol_dict["vol_surface_delta_space_exc_ms"] = \ + df_vol_surface_delta_space[ + ~df_vol_surface_delta_space.index.str.contains("_MS")] + df_vol_dict["vol_surface_quoted_points"] = df_vol_surface_quoted_points + df_vol_dict["deltas_vs_strikes"] = df_deltas_vs_strikes self._df_vol_dict = df_vol_dict @@ -502,31 +499,31 @@ def get_spot(self): return self._spot def get_atm_strike(self, tenor=None): - return self._df_vol_dict['deltas_vs_strikes'][tenor]['ATM'] + return self._df_vol_dict["deltas_vs_strikes"][tenor]["ATM"] def get_25d_call_strike(self, tenor=None): - return self._df_vol_dict['deltas_vs_strikes'][tenor]['K_25D_C'] + return self._df_vol_dict["deltas_vs_strikes"][tenor]["K_25D_C"] def get_25d_put_strike(self, tenor=None): - return self._df_vol_dict['deltas_vs_strikes'][tenor]['K_25D_P'] + return self._df_vol_dict["deltas_vs_strikes"][tenor]["K_25D_P"] def get_10d_call_strike(self, tenor=None): - return self._df_vol_dict['deltas_vs_strikes'][tenor]['K_10D_C'] + return self._df_vol_dict["deltas_vs_strikes"][tenor]["K_10D_C"] def get_10d_put_strike(self, tenor=None): - return self._df_vol_dict['deltas_vs_strikes'][tenor]['K_10D_P'] + return self._df_vol_dict["deltas_vs_strikes"][tenor]["K_10D_P"] def get_25d_call_ms_strike(self, tenor=None): - return self._df_vol_dict['deltas_vs_strikes'][tenor]['K_25D_C_MS'] + return self._df_vol_dict["deltas_vs_strikes"][tenor]["K_25D_C_MS"] def get_25d_put_ms_strike(self, tenor=None): - return self._df_vol_dict['deltas_vs_strikes'][tenor]['K_25D_P_MS'] + return self._df_vol_dict["deltas_vs_strikes"][tenor]["K_25D_P_MS"] def get_10d_call_ms_strike(self, expiry_date=None, tenor=None): - return self._df_vol_dict['deltas_vs_strikes'][tenor]['K_10D_C_MS'] + return self._df_vol_dict["deltas_vs_strikes"][tenor]["K_10D_C_MS"] def get_10d_put_ms_strike(self, expiry_date=None, tenor=None): - return self._df_vol_dict['deltas_vs_strikes'][tenor]['K_10D_P_MS'] + return self._df_vol_dict["deltas_vs_strikes"][tenor]["K_10D_P_MS"] def get_atm_quoted_vol(self, tenor): """The quoted ATM vol from the market (ie. which has NOT been obtained from build vol surface) @@ -544,35 +541,35 @@ def get_atm_quoted_vol(self, tenor): self._get_tenor_index(tenor)] def get_atm_vol(self, tenor=None): - return self._df_vol_dict['vol_surface_delta_space'][tenor]['ATM'] + return self._df_vol_dict["vol_surface_delta_space"][tenor]["ATM"] def get_25d_call_vol(self, tenor=None): - return self._df_vol_dict['vol_surface_delta_space'][tenor]['K_25D_C'] + return self._df_vol_dict["vol_surface_delta_space"][tenor]["K_25D_C"] def get_25d_put_vol(self, tenor=None): - return self._df_vol_dict['vol_surface_delta_space'][tenor]['K_25D_P'] + return self._df_vol_dict["vol_surface_delta_space"][tenor]["K_25D_P"] def get_25d_call_ms_vol(self, tenor=None): - return self._df_vol_dict['vol_surface_delta_space'][tenor][ - 'K_25D_C_MS'] + return self._df_vol_dict["vol_surface_delta_space"][tenor][ + "K_25D_C_MS"] def get_25d_put_ms_vol(self, tenor=None): - return self._df_vol_dict['vol_surface_delta_space'][tenor][ - 'K_25D_P_MS'] + return self._df_vol_dict["vol_surface_delta_space"][tenor][ + "K_25D_P_MS"] def get_10d_call_vol(self, tenor=None): - return self._df_vol_dict['vol_surface_delta_space'][tenor]['K_10D_C'] + return self._df_vol_dict["vol_surface_delta_space"][tenor]["K_10D_C"] def get_10d_put_vol(self, tenor=None): - return self._df_vol_dict['vol_surface_delta_space'][tenor]['K_10D_P'] + return self._df_vol_dict["vol_surface_delta_space"][tenor]["K_10D_P"] def get_10d_call_ms_vol(self, tenor=None): - return self._df_vol_dict['vol_surface_delta_space'][tenor][ - 'K_10D_C_MS'] + return self._df_vol_dict["vol_surface_delta_space"][tenor][ + "K_10D_C_MS"] def get_10d_put_ms_vol(self, tenor=None): - return self._df_vol_dict['vol_surface_delta_space'][tenor][ - 'K_10D_P_MS'] + return self._df_vol_dict["vol_surface_delta_space"][tenor][ + "K_10D_P_MS"] def get_dom_discount_curve(self): return self._dom_discount_curve diff --git a/finmarketpy/util/marketconstants.py b/finmarketpy/util/marketconstants.py index 86f0996..bf3de20 100644 --- a/finmarketpy/util/marketconstants.py +++ b/finmarketpy/util/marketconstants.py @@ -89,21 +89,21 @@ class MarketConstants(object): fx_options_tenor_for_interpolation = ["ON", "1W", "2W", "3W", "1M", "2M", "3M", "4M", "6M", "9M", "1Y"]#, "2Y", "3Y"] # What contract will we generally be trading? - fx_options_trading_tenor = '1M' + fx_options_trading_tenor = "1M" # When constructing total return index 'mult' or 'add' - fx_options_cum_index = 'mult' + fx_options_cum_index = "mult" # For total return index use option price in base currency/for - fx_options_index_premium_output = 'pct-for' + fx_options_index_premium_output = "pct-for" - fx_options_index_strike = 'atm' - fx_options_index_contract_type = 'european-call' + fx_options_index_strike = "atm" + fx_options_index_contract_type = "european-call" fx_options_freeze_implied_vol = False # What is the point at which we roll? - fx_options_roll_event = 'expiry-date' # 'month-end', 'expiry-date', 'no-roll' + fx_options_roll_event = "expiry-date" # 'month-end', 'expiry-date', 'no-roll' # How many days before that point should we roll? fx_options_roll_days_before = 5 @@ -114,23 +114,23 @@ class MarketConstants(object): # For fitting vol surface # 'CLARK5', 'CLARK', 'BBG', 'SABR' and 'SABR3' - fx_options_vol_function_type = 'CLARK5' - fx_options_depo_tenor = '1M' + fx_options_vol_function_type = "CLARK5" + fx_options_depo_tenor = "1M" # 'fwd-delta-neutral' or 'fwd-delta-neutral-premium-adj' or 'spot' or 'fwd' - fx_options_atm_method = 'fwd-delta-neutral-premium-adj' + fx_options_atm_method = "fwd-delta-neutral-premium-adj" # 'fwd-delta' or 'fwd-delta-prem-adj' or 'spot-delta-prem-adj' or 'spot-delta' - fx_options_delta_method = 'spot-delta-prem-adj' + fx_options_delta_method = "spot-delta-prem-adj" fx_options_alpha = 0.5 # 'pct-for' (in base currency pct) or 'pct-dom' (in terms currency pct) - fx_options_premium_output = 'pct-for' - fx_options_delta_output = 'pct-fwd-delta-prem-adj' + fx_options_premium_output = "pct-for" + fx_options_delta_output = "pct-fwd-delta-prem-adj" # 'nelmer-mead' or 'nelmer-mead-numba' (faster but less accurate) or 'cg' (conjugate gradient tends to be slower, but more accurate) - fx_options_solver = 'nelmer-mead-numba' - fx_options_pricing_engine = 'financepy' # 'financepy' or 'finmarketpy' + fx_options_solver = "nelmer-mead-numba" + fx_options_pricing_engine = "financepy" # 'financepy' or 'finmarketpy' fx_options_tol = 1e-8 diff --git a/finmarketpy/util/marketutil.py b/finmarketpy/util/marketutil.py index 41b48e9..1216c0e 100644 --- a/finmarketpy/util/marketutil.py +++ b/finmarketpy/util/marketutil.py @@ -24,19 +24,19 @@ def parse_date(self, date): date1 = datetime.datetime.utcnow() - if date is 'midnight': + if date == "midnight": date1 = datetime.datetime(date1.year, date1.month, date1.day, 0, 0, 0) - elif date is 'decade': + elif date == "decade": date1 = date1 - timedelta(days=365 * 10) - elif date is 'year': + elif date == "year": date1 = date1 - timedelta(days=365) - elif date is 'month': + elif date == "month": date1 = date1 - timedelta(days=30) - elif date is 'week': + elif date == "week": date1 = date1 - timedelta(days=7) - elif date is 'day': + elif date == "day": date1 = date1 - timedelta(days=1) - elif date is 'hour': + elif date == "hour": date1 = date1 - timedelta(hours=1) else: # format expected 'Jun 1 2005 01:33', '%b %d %Y %H:%M' diff --git a/finmarketpy_examples/fx_forwards_indices_examples.py b/finmarketpy_examples/fx_forwards_indices_examples.py index 76b4ad3..aab7428 100644 --- a/finmarketpy_examples/fx_forwards_indices_examples.py +++ b/finmarketpy_examples/fx_forwards_indices_examples.py @@ -1,19 +1,23 @@ -__author__ = 'saeedamen' # Saeed Amen +__author__ = "saeedamen" # -# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro +# Copyright 2020 Cuemacro # -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the -# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and limitations under the License. +# See the License for the specific language governing permissions and +# limitations under the License. # """ -Shows how to use finmarketpy to create total return indices for FX forwards with appropriate roll rules +Shows how to use finmarketpy to create total return indices for FX forwards +with appropriate roll rules """ import pandas as pd @@ -29,151 +33,191 @@ logger = LoggerManager().getLogger(__name__) -chart = Chart(engine='plotly') +chart = Chart(engine="plotly") market = Market(market_data_generator=MarketDataGenerator()) calculations = Calculations() # Choose run_example = 0 for everything -# run_example = 1 - creating USDBRL total return index rolling forwards and compare with BBG indices -# run_example = 2 - creating AUDJPY (via AUDUSD and JPYUSD) total return index rolling forwards & compare with BBG indices +# run_example = 1 - creating USDBRL total return index rolling forwards and +# compare with BBG indices +# run_example = 2 - creating AUDJPY (via AUDUSD and JPYUSD) total return index +# rolling forwards & compare with BBG indices run_example = 0 from finmarketpy.curve.fxforwardscurve import FXForwardsCurve ###### Create total return indices plot for USDBRL using forwards -# We shall be using USDBRL 1M forward contracts and rolling them 5 business days before month end +# We shall be using USDBRL 1M forward contracts and rolling them 5 business +# days before month end if run_example == 1 or run_example == 0: - cross = 'USDBRL' + cross = "USDBRL" # Download more tenors - fx_forwards_tenors = ['1W', '1M', '2M', '3M'] + fx_forwards_tenors = ["1W", "1M", "2M", "3M"] # Get USDBRL data for spot, forwards + depos - md_request = MarketDataRequest(start_date='02 Jan 2007', finish_date='01 Jun 2007', - data_source='bloomberg', cut='NYC', category='fx-forwards-market', + md_request = MarketDataRequest(start_date="02 Jan 2007", + finish_date="01 Jun 2007", + data_source="bloomberg", cut="NYC", + category="fx-forwards-market", tickers=cross, fx_forwards_tenor=fx_forwards_tenors, - base_depos_currencies=[cross[0:3], cross[3:6]], - cache_algo='cache_algo_return') + base_depos_currencies=[cross[0:3], + cross[3:6]], + cache_algo="cache_algo_return") # In case any missing values fill down (particularly can get this for NDFs) - df_market = market.fetch_market(md_request=md_request).fillna(method='ffill') + df_market = market.fetch_market(md_request=md_request).fillna( + method="ffill") fx_forwards_curve = FXForwardsCurve() - # Let's trade a 1M forward, and we roll 5 business days (based on both base + terms currency holidays) + # Let"s trade a 1M forward, and we roll 5 business days (based on both + # base + terms currency holidays) # before month end - df_cuemacro_tot_1M = fx_forwards_curve.construct_total_return_index(cross, df_market, - fx_forwards_trading_tenor='1M', - roll_days_before=5, - roll_event='month-end', - roll_months=1, - fx_forwards_tenor_for_interpolation=fx_forwards_tenors, - output_calculation_fields=True) - - df_cuemacro_tot_1M.columns = [x.replace('forward-tot', 'forward-tot-1M-cuemacro') for x in df_cuemacro_tot_1M.columns] - - # Now do a 3M forward, and we roll 5 business days before end of quarter(based on both base + terms currency holidays) + df_cuemacro_tot_1M = fx_forwards_curve.construct_total_return_index( + cross, + df_market, + fx_forwards_trading_tenor="1M", + roll_days_before=5, + roll_event="month-end", + roll_months=1, + fx_forwards_tenor_for_interpolation=fx_forwards_tenors, + output_calculation_fields=True) + + df_cuemacro_tot_1M.columns = [ + x.replace("forward-tot", "forward-tot-1M-cuemacro") for x in + df_cuemacro_tot_1M.columns] + + # Now do a 3M forward, and we roll 5 business days before end of quarter( + # based on both base + terms currency holidays) # before month end - df_cuemacro_tot_3M = fx_forwards_curve.construct_total_return_index(cross, df_market, - fx_forwards_trading_tenor='3M', - roll_days_before=5, - roll_event='month-end', - roll_months=3, - fx_forwards_tenor_for_interpolation=fx_forwards_tenors, - output_calculation_fields=True) - - df_cuemacro_tot_3M.columns = [x.replace('forward-tot', 'forward-tot-3M-cuemacro') for x in df_cuemacro_tot_3M.columns] + df_cuemacro_tot_3M = fx_forwards_curve.construct_total_return_index( + cross, + df_market, + fx_forwards_trading_tenor="3M", + roll_days_before=5, + roll_event="month-end", + roll_months=3, + fx_forwards_tenor_for_interpolation=fx_forwards_tenors, + output_calculation_fields=True) + + df_cuemacro_tot_3M.columns = [ + x.replace("forward-tot", "forward-tot-3M-cuemacro") for x in + df_cuemacro_tot_3M.columns] # Get spot data md_request.abstract_curve = None - md_request.category = 'fx' + md_request.category = "fx" df_spot = market.fetch_market(md_request=md_request) - df_spot.columns = [x + '-spot' for x in df_spot.columns] + df_spot.columns = [x + "-spot" for x in df_spot.columns] # Get Bloomberg calculated total return indices (for spot) - md_request.category = 'fx-tot' + md_request.category = "fx-tot" df_bbg_tot = market.fetch_market(md_request) - df_bbg_tot.columns = [x + '-bbg' for x in df_bbg_tot.columns] + df_bbg_tot.columns = [x + "-bbg" for x in df_bbg_tot.columns] # Get Bloomberg calculated total return indices (for 1M forwards rolled) - md_request.category = 'fx-tot-forwards' + md_request.category = "fx-tot-forwards" df_bbg_tot_forwards = market.fetch_market(md_request) - df_bbg_tot_forwards.columns = [x + '-bbg' for x in df_bbg_tot_forwards.columns] - - # Combine into a single data frame and plot, we note that the Cuemacro constructed indices track the Bloomberg - # indices relatively well (both from spot and forwards). Also note the large difference with spot indices - # CAREFUL to fill down, before reindexing because forwards indices are likely to have different publishing dates - df = calculations.join([pd.DataFrame(df_cuemacro_tot_1M[cross + '-forward-tot-1M-cuemacro.close']), - pd.DataFrame(df_cuemacro_tot_3M[cross + '-forward-tot-3M-cuemacro.close']), - df_bbg_tot, df_spot, df_bbg_tot_forwards], how='outer').fillna(method='ffill') + df_bbg_tot_forwards.columns = [x + "-bbg" for x in + df_bbg_tot_forwards.columns] + + # Combine into a single data frame and plot, we note that the Cuemacro + # constructed indices track the Bloomberg + # indices relatively well (both from spot and forwards). Also note the large + # difference with spot indices + # CAREFUL to fill down, before reindexing because forwards indices are + # likely to have different publishing dates + df = calculations.join([pd.DataFrame( + df_cuemacro_tot_1M[cross + "-forward-tot-1M-cuemacro.close"]), + pd.DataFrame(df_cuemacro_tot_3M[ + cross + "-forward-tot-3M-cuemacro.close"]), + df_bbg_tot, df_spot, df_bbg_tot_forwards], + how="outer").fillna(method="ffill") df = calculations.create_mult_index_from_prices(df) chart.plot(df) -###### Create total return indices plot for AUDJPY using the underlying USD legs (ie. AUDUSD & JPYUSD) +###### Create total return indices plot for AUDJPY using the underlying USD +# legs (ie. AUDUSD & JPYUSD) if run_example == 2 or run_example == 0: - cross = 'AUDJPY' + cross = "AUDJPY" # Download more tenors - fx_forwards_tenors = ['1W', '1M', '2M', '3M'] + fx_forwards_tenors = ["1W", "1M", "2M", "3M"] # Parameters for how to construct total return indices, and the rolling rule # 1M forward contract, and roll it 5 working days before month end - # We'll be constructing our total return index from AUDUSD and JPYUSD - fx_forwards_curve = FXForwardsCurve(fx_forwards_trading_tenor='1M', - roll_days_before=5, - roll_event='month-end', construct_via_currency='USD', - fx_forwards_tenor_for_interpolation=fx_forwards_tenors, - roll_months=1, - output_calculation_fields=True) - - # Get AUDJPY (AUDUSD and JPYUSD) data for spot, forwards + depos and also construct the total returns forward index - md_request = MarketDataRequest(start_date='02 Jan 2007', finish_date='01 Jun 2007', - data_source='bloomberg', cut='NYC', category='fx', + # We"ll be constructing our total return index from AUDUSD and JPYUSD + fx_forwards_curve = FXForwardsCurve( + fx_forwards_trading_tenor="1M", + roll_days_before=5, + roll_event="month-end", + construct_via_currency="USD", + fx_forwards_tenor_for_interpolation=fx_forwards_tenors, + roll_months=1, + output_calculation_fields=True) + + # Get AUDJPY (AUDUSD and JPYUSD) data for spot, forwards + depos and also + # construct the total returns forward index + md_request = MarketDataRequest(start_date="02 Jan 2007", + finish_date="01 Jun 2007", + data_source="bloomberg", cut="NYC", + category="fx", tickers=cross, fx_forwards_tenor=fx_forwards_tenors, - base_depos_currencies=[cross[0:3], cross[3:6]], - cache_algo='cache_algo_return', + base_depos_currencies=[cross[0:3], + cross[3:6]], + cache_algo="cache_algo_return", abstract_curve=fx_forwards_curve) # In case any missing values fill down (particularly can get this for NDFs) - df_cuemacro_tot_1M = market.fetch_market(md_request=md_request).fillna(method='ffill') + df_cuemacro_tot_1M = market.fetch_market(md_request=md_request).fillna( + method="ffill") fx_forwards_curve = FXForwardsCurve() - df_cuemacro_tot_1M.columns = [x.replace('forward-tot', 'forward-tot-1M-cuemacro') for x in df_cuemacro_tot_1M.columns] + df_cuemacro_tot_1M.columns = [ + x.replace("forward-tot", "forward-tot-1M-cuemacro") for x in + df_cuemacro_tot_1M.columns] # Get spot data md_request.abstract_curve = None - md_request.category = 'fx' + md_request.category = "fx" df_spot = market.fetch_market(md_request=md_request) - df_spot.columns = [x + '-spot' for x in df_spot.columns] + df_spot.columns = [x + "-spot" for x in df_spot.columns] # Get Bloomberg calculated total return indices (for spot) - md_request.category = 'fx-tot' + md_request.category = "fx-tot" df_bbg_tot = market.fetch_market(md_request) - df_bbg_tot.columns = [x + '-bbg' for x in df_bbg_tot.columns] + df_bbg_tot.columns = [x + "-bbg" for x in df_bbg_tot.columns] # Get Bloomberg calculated total return indices (for 1M forwards rolled) - md_request.category = 'fx-tot-forwards' + md_request.category = "fx-tot-forwards" df_bbg_tot_forwards = market.fetch_market(md_request) - df_bbg_tot_forwards.columns = [x + '-bbg' for x in df_bbg_tot_forwards.columns] - - # Combine into a single data frame and plot, we note that the Cuemacro constructed indices track the Bloomberg - # indices relatively well (both from spot and forwards). Also note the large difference with spot indices - # CAREFUL to fill down, before reindexing because forwards indices are likely to have different publishing dates - df = calculations.join([pd.DataFrame(df_cuemacro_tot_1M[cross + '-forward-tot-1M-cuemacro.close']), - df_bbg_tot, df_spot, df_bbg_tot_forwards], how='outer').fillna(method='ffill') + df_bbg_tot_forwards.columns = [x + "-bbg" for x in + df_bbg_tot_forwards.columns] + + # Combine into a single data frame and plot, we note that the Cuemacro + # constructed indices track the Bloomberg + # indices relatively well (both from spot and forwards). Also note the + # large difference with spot indices + # CAREFUL to fill down, before reindexing because forwards indices are + # likely to have different publishing dates + df = calculations.join([pd.DataFrame( + df_cuemacro_tot_1M[cross + "-forward-tot-1M-cuemacro.close"]), + df_bbg_tot, df_spot, df_bbg_tot_forwards], + how="outer").fillna(method="ffill") df = calculations.create_mult_index_from_prices(df) - chart.plot(df) \ No newline at end of file + chart.plot(df) diff --git a/finmarketpy_examples/fx_forwards_pricing_examples.py b/finmarketpy_examples/fx_forwards_pricing_examples.py index 51761e8..1760000 100644 --- a/finmarketpy_examples/fx_forwards_pricing_examples.py +++ b/finmarketpy_examples/fx_forwards_pricing_examples.py @@ -1,19 +1,24 @@ -__author__ = 'saeedamen' # Saeed Amen +__author__ = "saeedamen" # -# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro +# Copyright 2020 Cuemacro # -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the -# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and limitations under the License. +# See the License for the specific language governing permissions and +# limitations under the License. # + """ -Shows how to use finmarketpy to create total return indices for FX spot (ie. calculates spot returns + carry returns) +Shows how to use finmarketpy to create total return indices for FX spot (ie. +calculates spot returns + carry returns) """ import pandas as pd @@ -37,7 +42,7 @@ # run_example = 1 - get forwards prices for AUDUSD interpolated for an odd date/broken date # run_example = 2 - get implied deposit rate -run_example = 2 +run_example = 0 from finmarketpy.curve.rates.fxforwardspricer import FXForwardsPricer diff --git a/finmarketpy_examples/fx_options_indices_examples.py b/finmarketpy_examples/fx_options_indices_examples.py index 0667b70..54486d2 100644 --- a/finmarketpy_examples/fx_options_indices_examples.py +++ b/finmarketpy_examples/fx_options_indices_examples.py @@ -1,23 +1,30 @@ -__author__ = 'saeedamen' +__author__ = "saeedamen" # # Copyright 2020 Cuemacro # -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the -# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and limitations under the License. +# See the License for the specific language governing permissions and +# limitations under the License. # -""" -Shows how to use finmarketpy to total return indices for FX vanilla options (uses FinancePy underneath), so we can -see the historical P&L from for example, rolling a 1M call option etc. -Note, you will need to have a Bloomberg terminal (with blpapi Python library) to download the FX market data in order -to generate the FX option prices, which are used underneath (FX spot, FX forwards, FX implied volatility quotes and deposit rates) +""" +Shows how to use finmarketpy to total return indices for FX vanilla options +(uses FinancePy underneath), so we can see the historical P&L from for +example, rolling a 1M call option etc. + +Note, you will need to have a Bloomberg terminal (with blpapi Python library) +to download the FX market data in order to generate the FX option prices, +which are used underneath (FX spot, FX forwards, FX implied volatility quotes +and deposit rates) """ import pandas as pd @@ -38,217 +45,286 @@ logger = LoggerManager().getLogger(__name__) -chart = Chart(engine='plotly') +chart = Chart(engine="plotly") market = Market(market_data_generator=MarketDataGenerator()) # Choose run_example = 0 for everything -# run_example = 1 - create total return index AUDUSD 1M long calls (and separately long puts) over 2008 financial crisis and further -# run_example = 2 - create total return index USDJPY 1W short straddles over a long sample +# run_example = 1 - create total return index AUDUSD 1M long calls (and +# separately long puts) over 2008 financial crisis and further +# run_example = 2 - create total return index USDJPY 1W short straddles +# over a long sample run_example = 0 -def prepare_indices(cross, df_option_tot=None, df_option_tc=None, df_spot_tot=None): + +def prepare_indices(cross, df_option_tot=None, df_option_tc=None, + df_spot_tot=None): df_list = [] if df_option_tot is not None: - df_list.append(pd.DataFrame(df_option_tot[cross + '-option-tot.close'])) - df_list.append(pd.DataFrame(df_option_tot[cross + '-option-delta-tot.close'])) - df_list.append(pd.DataFrame(df_option_tot[cross + '-delta-pnl-index.close'])) + df_list.append( + pd.DataFrame(df_option_tot[cross + "-option-tot.close"])) + df_list.append( + pd.DataFrame(df_option_tot[cross + "-option-delta-tot.close"])) + df_list.append( + pd.DataFrame(df_option_tot[cross + "-delta-pnl-index.close"])) if df_option_tc is not None: - df_list.append(pd.DataFrame(df_option_tc[cross + '-option-tot-with-tc.close'])) - df_list.append(pd.DataFrame(df_option_tc[cross + '-option-delta-tot-with-tc.close'])) - df_list.append(pd.DataFrame(df_option_tc[cross + '-delta-pnl-index-with-tc.close'])) + df_list.append( + pd.DataFrame(df_option_tc[cross + "-option-tot-with-tc.close"])) + df_list.append(pd.DataFrame( + df_option_tc[cross + "-option-delta-tot-with-tc.close"])) + df_list.append(pd.DataFrame( + df_option_tc[cross + "-delta-pnl-index-with-tc.close"])) if df_spot_tot is not None: df_list.append(df_spot_tot) - df = calculations.join(df_list, how='outer').fillna(method='ffill') + df = calculations.join(df_list, how="outer").fillna(method="ffill") return calculations.create_mult_index_from_prices(df) -if __name__ == '__main__': - ###### Fetch market data for pricing AUDUSD options in 2007 (ie. FX spot, FX forwards, FX deposits and FX vol quotes) - ###### Construct volatility surface using FinancePy library underneath, using polynomial interpolation - ###### Enters a long 1M call, and MTM every day, and at expiry rolls into another long 1M call - if run_example == 1 or run_example == 0: +if __name__ == "__main__": - # Warning make sure you choose dates, where there is full vol surface! If points are missing interpolation + # Fetch market data for pricing AUDUSD options in 2007 (ie. FX spot, + # FX forwards, FX deposits and FX vol quotes) + # Construct volatility surface using FinancePy library underneath, + # using polynomial interpolation + # Enters a long 1M call, and MTM every day, and at expiry rolls into another long 1M call + if run_example == 1 or run_example == 0: + # Warning make sure you choose dates, where there is full vol surface! + # If points are missing interpolation # will fail - start_date = '01 Jan 2007'; finish_date = '31 Dec 2020' # Use smaller window for quicker execution - - cross = 'AUDUSD' - fx_options_trading_tenor = '1M' - - # Download the whole all market data for AUDUSD for pricing options (FX vol surface + spot + FX forwards + depos) - md_request = MarketDataRequest(start_date=start_date, finish_date=finish_date, - data_source='bloomberg', cut='BGN', category='fx-vol-market', - tickers=cross, fx_vol_tenor=['1W', '1M', '3M'], - cache_algo='cache_algo_return', base_depos_currencies=[cross[0:3], cross[3:6]]) + start_date = "01 Jan 2007"; + finish_date = "31 Dec 2020" # Use smaller window for quicker execution + + cross = "AUDUSD" + fx_options_trading_tenor = "1M" + + # Download the whole all market data for AUDUSD for pricing options + # (FX vol surface + spot + FX forwards + depos) + md_request = MarketDataRequest(start_date=start_date, + finish_date=finish_date, + data_source="bloomberg", cut="BGN", + category="fx-vol-market", + tickers=cross, + fx_vol_tenor=["1W", "1M", "3M"], + cache_algo="cache_algo_return", + base_depos_currencies=[cross[0:3], + cross[3:6]]) df_vol_market = market.fetch_market(md_request) - df_vol_market = df_vol_market.fillna(method='ffill') + df_vol_market = df_vol_market.fillna(method="ffill") - # Remove New Year's Day and Christmas - df_vol_market = Filter().filter_time_series_by_holidays(df_vol_market, cal='FX') + # Remove New Year"s Day and Christmas + df_vol_market = Filter().filter_time_series_by_holidays(df_vol_market, + cal="FX") # Get a total return index for trading spot - # This way we can take into account carry when calculating delta hedging P&L - md_request = MarketDataRequest(start_date=start_date, finish_date=finish_date, - data_source='bloomberg', cut='NYC', category='fx-tot', + # This way we can take into account carry when calculating delta + # hedging P&L + md_request = MarketDataRequest(start_date=start_date, + finish_date=finish_date, + data_source="bloomberg", cut="NYC", + category="fx-tot", tickers=cross, - cache_algo='cache_algo_return') + cache_algo="cache_algo_return") df_tot = market.fetch_market(md_request) - df_vol_market = df_vol_market.join(df_tot, how='left') - df_vol_market = df_vol_market.fillna(method='ffill') + df_vol_market = df_vol_market.join(df_tot, how="left") + df_vol_market = df_vol_market.fillna(method="ffill") # We want to roll long 1M ATM call at expiry - # We'll mark to market the option price through the month by interpolating between 1W and 1M (and using whole vol curve + # We"ll mark to market the option price through the month by + # interpolating between 1W and 1M (and using whole vol curve # at each tenor) - fx_options_curve = FXOptionsCurve(fx_options_trading_tenor=fx_options_trading_tenor, + fx_options_curve = FXOptionsCurve( + fx_options_trading_tenor=fx_options_trading_tenor, roll_days_before=0, - roll_event='expiry-date', + roll_event="expiry-date", roll_months=1, - fx_options_tenor_for_interpolation=['1W', '1M'], - strike='atm', - contract_type='european-call', - depo_tenor_for_option='1M', + fx_options_tenor_for_interpolation=["1W", "1M"], + strike="atm", + contract_type="european-call", + depo_tenor_for_option="1M", position_multiplier=1.0, - tot_label='tot', + tot_label="tot", output_calculation_fields=True) - # Let's trade a long 1M call, and we roll at expiry - df_cuemacro_option_call_tot = fx_options_curve.construct_total_return_index(cross, df_vol_market) + # Let"s trade a long 1M call, and we roll at expiry + df_cuemacro_option_call_tot = fx_options_curve.construct_total_return_index( + cross, df_vol_market) - # Add transaction costs to the option index (bid/ask bp for the option premium and spot FX) - df_cuemacro_option_call_tc = fx_options_curve.apply_tc_signals_to_total_return_index(cross, df_cuemacro_option_call_tot, - option_tc_bp=5, spot_tc_bp=2) + # Add transaction costs to the option index (bid/ask bp for the option + # premium and spot FX) + df_cuemacro_option_call_tc = fx_options_curve.apply_tc_signals_to_total_return_index( + cross, df_cuemacro_option_call_tot, + option_tc_bp=5, spot_tc_bp=2) - # Let's trade a long 1M OTM put, and we roll at expiry + # Let"s trade a long 1M OTM put, and we roll at expiry df_cuemacro_option_put_tot = fx_options_curve.construct_total_return_index( - cross, df_vol_market, contract_type='european-put', strike='10d-otm', position_multiplier=1.0) - - # Add transaction costs to the option index (bid/ask bp for the option premium and spot FX) - df_cuemacro_option_put_tc = fx_options_curve.apply_tc_signals_to_total_return_index(cross, df_cuemacro_option_put_tot, - option_tc_bp=5, spot_tc_bp=2) + cross, df_vol_market, contract_type="european-put", + strike="10d-otm", position_multiplier=1.0) + # Add transaction costs to the option index (bid/ask bp for the option + # premium and spot FX) + df_cuemacro_option_put_tc = fx_options_curve.apply_tc_signals_to_total_return_index( + cross, df_cuemacro_option_put_tot, + option_tc_bp=5, spot_tc_bp=2) # Get total returns for spot - df_bbg_tot = df_tot # from earlier! - df_bbg_tot.columns = [x + '-bbg' for x in df_bbg_tot.columns] + df_bbg_tot = df_tot # from earlier! + df_bbg_tot.columns = [x + "-bbg" for x in df_bbg_tot.columns] # Calculate a hedged portfolio of spot + 2*options (can we reduce drawdowns?) calculations = Calculations() ret_stats = RetStats() - df_hedged = calculations.join([df_bbg_tot[cross + '-tot.close-bbg'].to_frame(), df_cuemacro_option_put_tc[cross + '-option-tot-with-tc.close'].to_frame()], how='outer') - df_hedged = df_hedged.fillna(method='ffill') + df_hedged = calculations.join( + [df_bbg_tot[cross + "-tot.close-bbg"].to_frame(), + df_cuemacro_option_put_tc[ + cross + "-option-tot-with-tc.close"].to_frame()], how="outer") + df_hedged = df_hedged.fillna(method="ffill") df_hedged = df_hedged.pct_change() - df_hedged['Spot + 2*option put hedge'] = df_hedged[cross + '-tot.close-bbg'] + df_hedged[cross + '-option-tot-with-tc.close'] + df_hedged["Spot + 2*option put hedge"] = df_hedged[ + cross + "-tot.close-bbg"] + \ + df_hedged[ + cross + "-option-tot-with-tc.close"] - df_hedged.columns = RetStats(returns_df=df_hedged, ann_factor=252).summary() + df_hedged.columns = RetStats(returns_df=df_hedged, + ann_factor=252).summary() # Plot everything # P&L from call chart.plot(calculations.create_mult_index_from_prices( - prepare_indices(cross=cross, df_option_tot=df_cuemacro_option_call_tot, - df_option_tc=df_cuemacro_option_call_tc, df_spot_tot=df_bbg_tot))) + prepare_indices(cross=cross, + df_option_tot=df_cuemacro_option_call_tot, + df_option_tc=df_cuemacro_option_call_tc, + df_spot_tot=df_bbg_tot))) # P&L from put option, put option + TC and total returns from spot chart.plot(calculations.create_mult_index_from_prices( - prepare_indices(cross=cross,df_option_tot=df_cuemacro_option_put_tot, - df_option_tc=df_cuemacro_option_put_tc, df_spot_tot=df_bbg_tot))) + prepare_indices(cross=cross, + df_option_tot=df_cuemacro_option_put_tot, + df_option_tc=df_cuemacro_option_put_tc, + df_spot_tot=df_bbg_tot))) # P&L from put option + TC and total returns from spot chart.plot(calculations.create_mult_index_from_prices( - prepare_indices(cross=cross, df_option_tc=df_cuemacro_option_put_tc, df_spot_tot=df_bbg_tot))) + prepare_indices(cross=cross, + df_option_tc=df_cuemacro_option_put_tc, + df_spot_tot=df_bbg_tot))) - # P&L for total returns from spot and total returns from + 2*put option + TC (ie. hedged portfolio) + # P&L for total returns from spot and total returns from + # + 2*put option + TC (ie. hedged portfolio) chart.plot(calculations.create_mult_index(df_hedged)) # Plot delta from put option - chart.plot(df_cuemacro_option_put_tot[cross + '-delta.close']) - - - ###### Fetch market data for pricing EURUSD options from 2006-2020 (ie. FX spot, FX forwards, FX deposits and FX vol quotes) - ###### Construct volatility surface using FinancePy library underneath, using polynomial interpolation - ###### Enters a short 1W straddle, and MTM every day, and at expiry rolls into another 1W straddle + chart.plot(df_cuemacro_option_put_tot[cross + "-delta.close"]) + + # Fetch market data for pricing EURUSD options from 2006-2020 (ie. + # FX spot, FX forwards, FX deposits and FX vol quotes) + # Construct volatility surface using FinancePy library underneath, + # using polynomial interpolation + # Enters a short 1W straddle, and MTM every day, and at expiry rolls + # into another 1W straddle if run_example == 2 or run_example == 0: - - # Warning make sure you choose dates, where there is full vol surface! If vol points in the tenors you are looking at + # Warning make sure you choose dates, where there is full vol surface! + # If vol points in the tenors you are looking at # are missing then interpolation will fail (or if eg. spot data is missing etc.) - start_date = '08 Mar 2007'; finish_date = '31 Dec 2020' # Monday - # start_date = '09 Mar 2007'; finish_date = '31 Dec 2014' - # start_date = '04 Jan 2006'; finish_date = '31 Dec 2008' - # start_date = '01 Jan 2007'; finish_date = '31 Dec 2007' # Use smaller window for quicker execution - - cross = 'USDJPY' - fx_options_trading_tenor = '1W' # Try changing between 1W and 1M! - - # Download the whole all market data for USDJPY for pricing options (FX vol surface + spot + FX forwards + depos) - md_request = MarketDataRequest(start_date=start_date, finish_date=finish_date, - data_source='bloomberg', cut='10AM', category='fx-vol-market', - tickers=cross, fx_vol_tenor=['1W', '1M'], base_depos_tenor=['1W', '1M'], - cache_algo='cache_algo_return', base_depos_currencies=[cross[0:3], cross[3:6]]) + start_date = "08 Mar 2007"; + finish_date = "31 Dec 2020" # Monday + # start_date = "09 Mar 2007"; finish_date = "31 Dec 2014" + # start_date = "04 Jan 2006"; finish_date = "31 Dec 2008" + # start_date = "01 Jan 2007"; finish_date = "31 Dec 2007" # Use smaller window for quicker execution + + cross = "USDJPY" + fx_options_trading_tenor = "1W" # Try changing between 1W and 1M! + + # Download the whole all market data for USDJPY for pricing options + # (FX vol surface + spot + FX forwards + depos) + md_request = MarketDataRequest(start_date=start_date, + finish_date=finish_date, + data_source="bloomberg", cut="10AM", + category="fx-vol-market", + tickers=cross, + fx_vol_tenor=["1W", "1M"], + base_depos_tenor=["1W", "1M"], + cache_algo="cache_algo_return", + base_depos_currencies=[cross[0:3], + cross[3:6]]) df = market.fetch_market(md_request) - # Fill data for every workday and use weekend calendar (note: this is a bit of a fudge, filling down) - # CHECK DATA isn't missing at start of series - df = df.resample('B').last().fillna(method='ffill') - df = df[df.index >= '09 Mar 2007'] # Try starting on a different day of the week & see how it impact P&L? - cal = 'WKD' + # Fill data for every workday and use weekend calendar (note: this is + # a bit of a fudge, filling down) + # CHECK DATA isn"t missing at start of series + df = df.resample("B").last().fillna(method="ffill") + df = df[ + df.index >= "09 Mar 2007"] # Try starting on a different day of + # the week & see how it impact P&L? + cal = "WKD" - # Remove New Year's Day and Christmas - # df = Filter().filter_time_series_by_holidays(df, cal='FX') + # Remove New Year"s Day and Christmas + # df = Filter().filter_time_series_by_holidays(df, cal="FX") # We want to roll a short 1W option at expiry - # If we select longer dates, it will mark to market the price through the month by interpolating between eg. 1W and 1M + # If we select longer dates, it will mark to market the price through + # the month by interpolating between eg. 1W and 1M # (and using whole vol curve at each tenor) - fx_options_curve = FXOptionsCurve(fx_options_trading_tenor=fx_options_trading_tenor, + fx_options_curve = FXOptionsCurve( + fx_options_trading_tenor=fx_options_trading_tenor, roll_days_before=0, - roll_event='expiry-date', - roll_months=1, # This is ignored if we roll on expiry date - fx_options_tenor_for_interpolation=['1W', '1M'], - strike='atm', - contract_type='european-straddle', - position_multiplier=-1.0, # +1.0 for long options, -1.0 for short options + roll_event="expiry-date", + roll_months=1, # This is ignored if we roll on expiry date + fx_options_tenor_for_interpolation=["1W", "1M"], + strike="atm", + contract_type="european-straddle", + position_multiplier=-1.0, + # +1.0 for long options, -1.0 for short options output_calculation_fields=True, freeze_implied_vol=True, cal=cal, - cum_index='mult') + cum_index="mult") - # Let's trade a short straddle, and we roll at expiry - df_cuemacro_option_straddle_tot = fx_options_curve.construct_total_return_index(cross, df, - depo_tenor_for_option=fx_options_trading_tenor) + # Let"s trade a short straddle, and we roll at expiry + df_cuemacro_option_straddle_tot = fx_options_curve.construct_total_return_index( + cross, df, + depo_tenor_for_option=fx_options_trading_tenor) - # Add transaction costs to the option index (bid/ask bp for the option premium and spot FX) + # Add transaction costs to the option index (bid/ask bp for the option + # premium and spot FX) # Have wider spread for straddle (because adding call + put) - df_cuemacro_option_straddle_tc = fx_options_curve.apply_tc_signals_to_total_return_index(cross, df_cuemacro_option_straddle_tot, - option_tc_bp=10, spot_tc_bp=2) + df_cuemacro_option_straddle_tc = fx_options_curve.apply_tc_signals_to_total_return_index( + cross, df_cuemacro_option_straddle_tot, + option_tc_bp=10, spot_tc_bp=2) # Get total returns for spot md_request.abstract_curve = None # Get Bloomberg calculated total return indices (for spot) - md_request.category = 'fx-tot' - md_request.cut = 'NYC' + md_request.category = "fx-tot" + md_request.cut = "NYC" df_bbg_tot = market.fetch_market(md_request) - df_bbg_tot.columns = [x + '-bbg' for x in df_bbg_tot.columns] + df_bbg_tot.columns = [x + "-bbg" for x in df_bbg_tot.columns] calculations = Calculations() df_index = calculations.create_mult_index_from_prices( - prepare_indices(cross=cross, df_option_tc=df_cuemacro_option_straddle_tc, df_spot_tot=df_bbg_tot)) + prepare_indices(cross=cross, + df_option_tc=df_cuemacro_option_straddle_tc, + df_spot_tot=df_bbg_tot)) from finmarketpy.economics.quickchart import QuickChart - QuickChart(engine='plotly').plot_chart_with_ret_stats(df=df_index, plotly_plot_mode='offline_html', scale_factor=-1.5) \ No newline at end of file + QuickChart(engine="plotly").plot_chart_with_ret_stats( + df=df_index, + plotly_plot_mode="offline_html", + scale_factor=-1.5) diff --git a/finmarketpy_examples/fx_options_pricing_examples.py b/finmarketpy_examples/fx_options_pricing_examples.py index 882c2ad..4661e68 100644 --- a/finmarketpy_examples/fx_options_pricing_examples.py +++ b/finmarketpy_examples/fx_options_pricing_examples.py @@ -1,23 +1,27 @@ -__author__ = 'saeedamen' +__author__ = "saeedamen" # # Copyright 2020 Cuemacro # -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the -# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and limitations under the License. +# See the License for the specific language governing permissions and +# limitations under the License. # """ -Shows how to use finmarketpy to price FX options (uses FinancePy underneath - it is recommended you pull the latest -version of FinancePy from GitHub). +Shows how to use finmarketpy to price FX options (uses FinancePy underneath - +it is recommended you pull the latest version of FinancePy from GitHub). -Note, you will need to have a Bloomberg terminal (with blpapi Python library) to download the FX market data in order -to plot these vol surface (FX spot, FX forwards, FX implied_vol volatility quotes and deposits) +Note, you will need to have a Bloomberg terminal (with blpapi Python library) +to download the FX market data in order to plot these vol surface (FX spot, +FX forwards, FX implied_vol volatility quotes and deposits) """ import pandas as pd @@ -36,7 +40,8 @@ logger = LoggerManager().getLogger(__name__) -chart = Chart(engine='plotly') +chart = Chart(engine="plotly") + market = Market(market_data_generator=MarketDataGenerator()) # Choose run_example = 0 for everything @@ -48,23 +53,28 @@ # run_example = 6 - another USDJPY option # run_example = 7 - price USDBRL options -run_example = 1 +run_example = 0 -if __name__ == '__main__': +if __name__ == "__main__": - ###### Fetch market data for pricing GBPUSD FX options over Brexit vote (ie. FX spot, FX forwards, FX deposits and FX vol quotes) - ###### Construct volatility surface using FinancePy library underneath, using polynomial interpolation and - ###### Then price some options over these dates eg. atm, 25d-call etc. + # Fetch market data for pricing GBPUSD FX options over Brexit vote + # (ie. FX spot, FX forwards, FX deposits and FX vol quotes) + # Construct volatility surface using FinancePy library underneath, + # using polynomial interpolation and then price some options over these + # dates eg. atm, 25d-call etc. if run_example == 1 or run_example == 0: - - horizon_date = '23 Jun 2016' - cross = 'GBPUSD' + horizon_date = "23 Jun 2016" + cross = "GBPUSD" # Download the whole all market data for GBPUSD for pricing options (vol surface) - md_request = MarketDataRequest(start_date=horizon_date, finish_date=horizon_date, - data_source='bloomberg', cut='NYC', category='fx-vol-market', - tickers=cross, base_depos_currencies=[cross[0:3], cross[3:6]], - cache_algo='cache_algo_return') + md_request = MarketDataRequest(start_date=horizon_date, + finish_date=horizon_date, + data_source="bloomberg", cut="NYC", + category="fx-vol-market", + tickers=cross, + base_depos_currencies=[cross[0:3], + cross[3:6]], + cache_algo="cache_algo_return") df = market.fetch_market(md_request) @@ -75,219 +85,269 @@ # Price several different options print("atm 1M european call") - print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), 'atm', contract_type='european-call', tenor='1M').to_string()) + print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), "atm", + contract_type="european-call", + tenor="1M").to_string()) print("25d 1W european put") - print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), '25d-otm', contract_type='european-put', tenor='1W').to_string()) + print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), + "25d-otm", contract_type="european-put", + tenor="1W").to_string()) - # Try a broken date 12D option (note, for broken dates, currently doesn't interpolate key strikes) + # Try a broken date 12D option (note, for broken dates, currently doesn"t interpolate key strikes) # Specify expiry date instead of the tenor for broken dates print("1.50 12D european call") print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), 1.50, - expiry_date=pd.Timestamp(horizon_date) + pd.Timedelta(days=12), contract_type='european-call').to_string()) - + expiry_date=pd.Timestamp( + horizon_date) + pd.Timedelta(days=12), + contract_type="european-call").to_string()) ###### Fetch market data for pricing USDJPY FX options over Brexit vote (ie. FX spot, FX forwards, FX deposits and FX vol quotes) ###### Construct volatility surface using FinancePy library underneath, using polynomial interpolation ###### Then price a series of 1W ATM call options if run_example == 2 or run_example == 0: + start_date = "02 Nov 2020"; + finish_date = "05 Nov 2020" + horizon_date = pd.bdate_range(start_date, finish_date, freq="B") - start_date = '02 Nov 2020'; finish_date = '05 Nov 2020' - horizon_date = pd.bdate_range(start_date, finish_date, freq='B') - - cross = 'USDJPY' + cross = "USDJPY" # Download the whole all market data for GBPUSD for pricing options (vol surface) - md_request = MarketDataRequest(start_date=start_date, finish_date=finish_date, - data_source='bloomberg', cut='NYC', category='fx-vol-market', + md_request = MarketDataRequest(start_date=start_date, + finish_date=finish_date, + data_source="bloomberg", cut="NYC", + category="fx-vol-market", tickers=cross, - cache_algo='cache_algo_return', base_depos_currencies=[cross[0:3], cross[3:6]]) + cache_algo="cache_algo_return", + base_depos_currencies=[cross[0:3], + cross[3:6]]) df = market.fetch_market(md_request) - # Skip 3W/4M because this particular close (NYC) doesn't have that in USDJPY market data - tenors = ["ON", "1W", "2W", "1M", "2M", "3M", "6M", "9M", "1Y", "2Y", "3Y"] + # Skip 3W/4M because this particular close (NYC) doesn"t have that in USDJPY market data + tenors = ["ON", "1W", "2W", "1M", "2M", "3M", "6M", "9M", "1Y", "2Y", + "3Y"] fx_vol_surface = FXVolSurface(market_df=df, asset=cross, tenors=tenors) fx_op = FXOptionsPricer(fx_vol_surface=fx_vol_surface) print("atm 1W european put") - print(fx_op.price_instrument(cross, horizon_date, 'atm', contract_type='european-put', - tenor='1W', depo_tenor='1W').to_string()) + print(fx_op.price_instrument(cross, horizon_date, "atm", + contract_type="european-put", + tenor="1W", depo_tenor="1W").to_string()) print("25d 3M european call") - print(fx_op.price_instrument(cross, horizon_date, '25d-otm', contract_type='european-call', - tenor='3M', depo_tenor='3M').to_string()) + print(fx_op.price_instrument(cross, horizon_date, "25d-otm", + contract_type="european-call", + tenor="3M", depo_tenor="3M").to_string()) print("10d 1M european put") - print(fx_op.price_instrument(cross, horizon_date, '10d-otm', contract_type='european-put', - tenor='1M', depo_tenor='1M').to_string()) + print(fx_op.price_instrument(cross, horizon_date, "10d-otm", + contract_type="european-put", + tenor="1M", depo_tenor="1M").to_string()) ###### Fetch market data for pricing AUDUSD options on 18 Apr 2007, just before credit crisis ###### Construct volatility surface using FinancePy library underneath, using polynomial interpolation and ###### Then price some options over these dates eg. atm, 25d-call etc. if run_example == 3 or run_example == 0: - - horizon_date = '18 Apr 2007' - cross = 'AUDUSD' + horizon_date = "18 Apr 2007" + cross = "AUDUSD" # Download the whole all market data for GBPUSD for pricing options (vol surface) - md_request = MarketDataRequest(start_date=horizon_date, finish_date=horizon_date, - data_source='bloomberg', cut='LDN', category='fx-vol-market', - tickers=cross, base_depos_currencies=[cross[0:3], cross[3:6]], - cache_algo='cache_algo_return') + md_request = MarketDataRequest(start_date=horizon_date, + finish_date=horizon_date, + data_source="bloomberg", cut="LDN", + category="fx-vol-market", + tickers=cross, + base_depos_currencies=[cross[0:3], + cross[3:6]], + cache_algo="cache_algo_return") df = market.fetch_market(md_request) - fx_vol_surface = FXVolSurface(market_df=df, asset=cross, tenors=['ON', '1W', '1M']) + fx_vol_surface = FXVolSurface(market_df=df, asset=cross, + tenors=["ON", "1W", "1M"]) fx_op = FXOptionsPricer(fx_vol_surface=fx_vol_surface) - # Try a broken date 15D option (note, for broken dates, currently doesn't interpolate key strikes) + # Try a broken date 15D option (note, for broken dates, currently doesn"t interpolate key strikes) # Specify expiry date instead of the tenor for broken dates print("atm 15D european call") print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), 0.8124, - expiry_date=pd.Timestamp(horizon_date) + pd.Timedelta(days=15), contract_type='european-call').to_string()) + expiry_date=pd.Timestamp( + horizon_date) + pd.Timedelta(days=15), + contract_type="european-call").to_string()) ###### Fetch market data for pricing AUDUSD options during start of 2008 Credit Crisis if run_example == 4 or run_example == 0: - - horizon_date = '17 Aug 2007' - cross = 'AUDUSD' + horizon_date = "17 Aug 2007" + cross = "AUDUSD" # Download the whole all market data for GBPUSD for pricing options (vol surface) - md_request = MarketDataRequest(start_date=horizon_date, finish_date=horizon_date, - data_source='bloomberg', cut='BGN', category='fx-vol-market', - tickers=cross, base_depos_currencies=[cross[0:3], cross[3:6]], - cache_algo='cache_algo_return') + md_request = MarketDataRequest(start_date=horizon_date, + finish_date=horizon_date, + data_source="bloomberg", cut="BGN", + category="fx-vol-market", + tickers=cross, + base_depos_currencies=[cross[0:3], + cross[3:6]], + cache_algo="cache_algo_return") df = market.fetch_market(md_request) - fx_vol_surface = FXVolSurface(market_df=df, asset=cross, tenors=['1W', '1M', '3M']) + fx_vol_surface = FXVolSurface(market_df=df, asset=cross, + tenors=["1W", "1M", "3M"]) fx_vol_surface.build_vol_surface(pd.Timestamp(horizon_date)) fx_op = FXOptionsPricer(fx_vol_surface=fx_vol_surface) # Price several different options - # Try a broken date 15D option (note, for broken dates, currently doesn't interpolate key strikes) + # Try a broken date 15D option (note, for broken dates, currently doesn"t interpolate key strikes) # Specify expiry date instead of the tenor for broken dates print("atm 15D european call") print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), 0.8535, - expiry_date=pd.Timestamp('05 Sep 2007'), contract_type='european-call').to_string()) + expiry_date=pd.Timestamp("05 Sep 2007"), + contract_type="european-call").to_string()) ###### Fetch market data for pricing EURUSD options during start of 2006 if run_example == 5 or run_example == 0: - - horizon_date = '04 Jan 2006' - cross = 'EURUSD' + horizon_date = "04 Jan 2006" + cross = "EURUSD" # Download the whole all market data for GBPUSD for pricing options (vol surface) - md_request = MarketDataRequest(start_date=horizon_date, finish_date=horizon_date, - data_source='bloomberg', cut='BGN', category='fx-vol-market', - tickers=cross, base_depos_currencies=[cross[0:3], cross[3:6]], - cache_algo='cache_algo_return') + md_request = MarketDataRequest(start_date=horizon_date, + finish_date=horizon_date, + data_source="bloomberg", cut="BGN", + category="fx-vol-market", + tickers=cross, + base_depos_currencies=[cross[0:3], + cross[3:6]], + cache_algo="cache_algo_return") df = market.fetch_market(md_request) - fx_vol_surface = FXVolSurface(market_df=df, asset=cross, tenors=['1W', '1M', '3M']) + fx_vol_surface = FXVolSurface(market_df=df, asset=cross, + tenors=["1W", "1M", "3M"]) fx_op = FXOptionsPricer(fx_vol_surface=fx_vol_surface) # Price several different options - # Try a broken date 15D option (note, for broken dates, currently doesn't interpolate key strikes) + # Try a broken date 15D option (note, for broken dates, currently doesn"t interpolate key strikes) # Specify expiry date instead of the tenor for broken dates print("atm 1W european call") - print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), 'atm', - tenor="1W", depo_tenor='1W', contract_type='european-call').to_string()) + print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), "atm", + tenor="1W", depo_tenor="1W", + contract_type="european-call").to_string()) ###### Fetch market data for pricing USDJPY ATM 1W if run_example == 6 or run_example == 0: - - horizon_date = '30 March 2007' - cross = 'USDJPY' + horizon_date = "30 March 2007" + cross = "USDJPY" # Download the whole all market data for GBPUSD for pricing options (vol surface) - md_request = MarketDataRequest(start_date=horizon_date, finish_date=horizon_date, - data_source='bloomberg', cut='LDN', category='fx-vol-market', - fx_vol_tenor=['1W'], - tickers=cross, base_depos_currencies=[cross[0:3], cross[3:6]], - cache_algo='cache_algo_return') + md_request = MarketDataRequest(start_date=horizon_date, + finish_date=horizon_date, + data_source="bloomberg", cut="LDN", + category="fx-vol-market", + fx_vol_tenor=["1W"], + tickers=cross, + base_depos_currencies=[cross[0:3], + cross[3:6]], + cache_algo="cache_algo_return") df = market.fetch_market(md_request) - fx_vol_surface = FXVolSurface(market_df=df, asset=cross, tenors=['1W'], solver='nelmer-mead-numba') + fx_vol_surface = FXVolSurface(market_df=df, asset=cross, tenors=["1W"], + solver="nelmer-mead-numba") fx_op = FXOptionsPricer(fx_vol_surface=fx_vol_surface) market_df = fx_vol_surface.get_all_market_data() # Print 1W data - print(market_df[[x for x in market_df.columns if '1W' in x]][market_df.index == horizon_date].to_string()) + print(market_df[[x for x in market_df.columns if "1W" in x]][ + market_df.index == horizon_date].to_string()) # Print ATM vol fx_vol_surface.build_vol_surface(horizon_date) fx_vol_surface.extract_vol_surface(num_strike_intervals=None) - print("ATM vol " + str(fx_vol_surface.get_atm_vol(tenor='1W'))) + print("ATM vol " + str(fx_vol_surface.get_atm_vol(tenor="1W"))) # Specify expiry date instead of the tenor for broken dates print("atm 1W european straddle") - print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), 'atm', - tenor="1W", depo_tenor='1W', contract_type='european-straddle').to_string()) - + print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), "atm", + tenor="1W", depo_tenor="1W", + contract_type="european-straddle").to_string()) ###### Price USDBRL option around 2018 2nd round of presidential election if run_example == 7 or run_example == 0: - - horizon_date = '26 Oct 2018' - cross = 'USDBRL' - non_usd = 'BRL' + horizon_date = "26 Oct 2018" + cross = "USDBRL" + non_usd = "BRL" # Download the whole all market data for USDBRL for pricing options (vol surface) - md_request = MarketDataRequest(start_date=horizon_date, finish_date=horizon_date, - data_source='bloomberg', cut='NYC', category='fx-vol-market', - tickers=cross, base_depos_currencies=[cross[0:3]], - cache_algo='cache_algo_return') + md_request = MarketDataRequest(start_date=horizon_date, + finish_date=horizon_date, + data_source="bloomberg", cut="NYC", + category="fx-vol-market", + tickers=cross, + base_depos_currencies=[cross[0:3]], + cache_algo="cache_algo_return") df = market.fetch_market(md_request) # Compute implied deposit BRL 1M from USDBRL forwards (and USD 1M depo) fx_forwards_price = FXForwardsPricer() - implied_depo_df = fx_forwards_price.calculate_implied_depo(cross, non_usd, market_df=df, - fx_forwards_tenor=['1W', '1M'], - depo_tenor=['1W', '1M']) - - implied_depo_df.columns = [x.replace('-implied-depo', '') for x in implied_depo_df.columns] - df = df.join(implied_depo_df, how='left') - - # USDBRL quoted ATMF implied vol (as opposed to delta neutral) hence 'fwd' parameter - fx_op = FXOptionsPricer(fx_vol_surface=FXVolSurface(market_df=df, asset=cross, atm_method='fwd', depo_tenor='1M')) + implied_depo_df = fx_forwards_price.calculate_implied_depo(cross, + non_usd, + market_df=df, + fx_forwards_tenor=[ + "1W", + "1M"], + depo_tenor=[ + "1W", + "1M"]) + + implied_depo_df.columns = [x.replace("-implied-depo", "") for x in + implied_depo_df.columns] + df = df.join(implied_depo_df, how="left") + + # USDBRL quoted ATMF implied vol (as opposed to delta neutral) hence "fwd" parameter + fx_op = FXOptionsPricer( + fx_vol_surface=FXVolSurface(market_df=df, asset=cross, + atm_method="fwd", depo_tenor="1M")) # Price several different options print(df) print("atm 1M european put") - print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), 'atm', contract_type='european-put', tenor='1M').to_string()) + print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), "atm", + contract_type="european-put", + tenor="1M").to_string()) # TODO: calendar around election results in slightly different pricing - # print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), '25d-otm', contract_type='european-put', tenor='1W').to_string()) - # print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), 3.5724, contract_type='european-put', expiry_date=pd.Timestamp('2 Nov 2018')).to_string()) + # print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), "25d-otm", contract_type="european-put", tenor="1W").to_string()) + # print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), 3.5724, contract_type="european-put", expiry_date=pd.Timestamp("2 Nov 2018")).to_string()) ###### Price GBPUSD option around Brexit with unquoted deltas if run_example == 8 or run_example == 0: - - horizon_date = '23 Jun 2016' - cross = 'GBPUSD' + horizon_date = "23 Jun 2016" + cross = "GBPUSD" # Download the whole all market data for GBPUSD for pricing options (vol surface) - md_request = MarketDataRequest(start_date=horizon_date, finish_date=horizon_date, - data_source='bloomberg', cut='NYC', category='fx-vol-market', - tickers=cross, base_depos_currencies=[cross[0:3], cross[3:6]], - cache_algo='cache_algo_return') + md_request = MarketDataRequest(start_date=horizon_date, + finish_date=horizon_date, + data_source="bloomberg", cut="NYC", + category="fx-vol-market", + tickers=cross, + base_depos_currencies=[cross[0:3], + cross[3:6]], + cache_algo="cache_algo_return") df = market.fetch_market(md_request) @@ -297,7 +357,11 @@ # Price several different options print("atm 1M european call") - print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), 'atm', contract_type='european-call', tenor='1M').to_string()) + print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), "atm", + contract_type="european-call", + tenor="1M").to_string()) print("25d 1W european put") - print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), '25d-otm', contract_type='european-put', tenor='1W').to_string()) + print(fx_op.price_instrument(cross, pd.Timestamp(horizon_date), + "25d-otm", contract_type="european-put", + tenor="1W").to_string()) diff --git a/finmarketpy_examples/fx_spot_indices_examples.py b/finmarketpy_examples/fx_spot_indices_examples.py index bf4f4c5..4e00bf0 100644 --- a/finmarketpy_examples/fx_spot_indices_examples.py +++ b/finmarketpy_examples/fx_spot_indices_examples.py @@ -1,19 +1,23 @@ -__author__ = 'saeedamen' # Saeed Amen +__author__ = "saeedamen" # -# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro +# Copyright 2020 Cuemacro # -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the -# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and limitations under the License. +# See the License for the specific language governing permissions and +# limitations under the License. # """ -Shows how to use finmarketpy to create total return indices for FX spot (ie. calculates spot returns + carry returns) +Shows how to use finmarketpy to create total return indices for FX spot (ie. +calculates spot returns + carry returns) """ import pandas as pd @@ -29,114 +33,139 @@ logger = LoggerManager().getLogger(__name__) -chart = Chart(engine='plotly') +chart = Chart(engine="plotly") market = Market(market_data_generator=MarketDataGenerator()) calculations = Calculations() # Choose run_example = 0 for everything -# run_example = 1 - create daily total return indices from FX spot data + deposit for AUDJPY, and compare -# run_example = 2 - create intraday total return indices from FX spot data + deposit for GBPUSD, and compare with daily +# run_example = 1 - create daily total return indices from FX spot data + +# deposit for AUDJPY, and compare +# run_example = 2 - create intraday total return indices from FX spot data + +# deposit for GBPUSD, and compare with daily run_example = 0 from finmarketpy.curve.fxspotcurve import FXSpotCurve -###### Create total return indices plot for AUDJPY (from perspective of a USD investor) -###### Compare with AUDJPY FX spot and BBG constructed AUDJPY total return indices +# Create total return indices plot for AUDJPY (from perspective of a USD investor) +# Compare with AUDJPY FX spot and BBG constructed AUDJPY total return indices if run_example == 1 or run_example == 0: - - # Get AUDJPY total returns from perspective of USD investor (via AUDUSD & JPYUSD and AUD, USD & JPY overnight deposit rates) - md_request = MarketDataRequest(start_date='01 Jan 1999', finish_date='01 Dec 2020', - data_source='bloomberg', cut='NYC', category='fx', - tickers=['AUDJPY'], - cache_algo='cache_algo_return', - abstract_curve=FXSpotCurve(construct_via_currency='USD', depo_tenor='ON')) + # Get AUDJPY total returns from perspective of USD investor (via AUDUSD & + # JPYUSD and AUD, USD & JPY overnight deposit rates) + md_request = MarketDataRequest(start_date="01 Jan 1999", + finish_date="01 Dec 2020", + data_source="bloomberg", cut="NYC", + category="fx", + tickers=["AUDJPY"], + cache_algo="cache_algo_return", + abstract_curve=FXSpotCurve( + construct_via_currency="USD", + depo_tenor="ON")) df_tot = market.fetch_market(md_request=md_request) - df_tot.columns = [x + '-tot-cuemacro' for x in df_tot.columns] + df_tot.columns = [x + "-tot-cuemacro" for x in df_tot.columns] # Get spot data md_request.abstract_curve = None df_spot = market.fetch_market(md_request=md_request) - df_spot.columns = [x + '-spot' for x in df_spot.columns] + df_spot.columns = [x + "-spot" for x in df_spot.columns] # Get Bloomberg calculated total return indices (for spot) - md_request.category = 'fx-tot' + md_request.category = "fx-tot" df_bbg_tot = market.fetch_market(md_request) - df_bbg_tot.columns = [x + '-bbg' for x in df_bbg_tot.columns] + df_bbg_tot.columns = [x + "-bbg" for x in df_bbg_tot.columns] # Get Bloomberg calculated total return indices (for 1M forwards rolled) - md_request.category = 'fx-tot-forwards' + md_request.category = "fx-tot-forwards" df_bbg_tot_forwards = market.fetch_market(md_request) - df_bbg_tot_forwards.columns = [x + '-bbg' for x in df_bbg_tot_forwards.columns] - - # Combine into a single data frame and plot, we note that the Cuemacro constructed indices track the Bloomberg - # indices relatively well (both from spot and 1M forwards). Also note the large difference with spot indices - # CAREFUL to fill down, before reindexing because 1M forwards indices are likely to have different publishing dates - df = calculations.join([df_tot, df_bbg_tot, df_spot, df_bbg_tot_forwards], how='outer').fillna(method='ffill') + df_bbg_tot_forwards.columns = [x + "-bbg" for x in + df_bbg_tot_forwards.columns] + + # Combine into a single data frame and plot, we note that the Cuemacro + # constructed indices track the Bloomberg indices relatively well (both + # from spot and 1M forwards). Also note the large difference with spot indices + # CAREFUL to fill down, before reindexing because 1M forwards indices are + # likely to have different publishing dates + df = calculations.join([df_tot, df_bbg_tot, df_spot, df_bbg_tot_forwards], + how="outer").fillna(method="ffill") df = calculations.create_mult_index_from_prices(df) chart.plot(df) -###### Create total return indices plot for GBPUSD with intraday and daily data (from perspective of a USD investor) -###### Compare intraday and daily total return indices +# Create total return indices plot for GBPUSD with intraday and daily data +# (from perspective of a USD investor) +# Compare intraday and daily total return indices if run_example == 2 or run_example == 0: - import pytz # Get GBPUSD total returns from perspective of USD investor (via GBP and USD rates) - md_request = MarketDataRequest(start_date='01 Jan 2019', finish_date='01 Jul 2019', - data_source='bloomberg', cut='NYC', category='fx', - tickers=['GBPUSD'], - cache_algo='cache_algo_return', - abstract_curve=FXSpotCurve(construct_via_currency='USD', depo_tenor='ON')) + md_request = MarketDataRequest(start_date="01 Jan 2019", + finish_date="01 Jul 2019", + data_source="bloomberg", cut="NYC", + category="fx", + tickers=["GBPUSD"], + cache_algo="cache_algo_return", + abstract_curve=FXSpotCurve( + construct_via_currency="USD", + depo_tenor="ON")) df_tot = market.fetch_market(md_request=md_request) - df_tot.columns = [x + '-tot-cuemacro' for x in df_tot.columns] + df_tot.columns = [x + "-tot-cuemacro" for x in df_tot.columns] df_tot = df_tot.tz_localize(pytz.utc) - df_tot.index = df_tot.index + pd.Timedelta(hours=22) # Roughly NY close 2200 GMT + df_tot.index = df_tot.index + pd.Timedelta( + hours=22) # Roughly NY close 2200 GMT md_request.abstract_curve = None # Get intraday spot data - md_request.freq = 'tick' - md_request.data_source = 'dukascopy' + md_request.freq = "tick" + md_request.data_source = "dukascopy" df_intraday_spot = market.fetch_market(md_request=md_request) - df_intraday_spot = pd.DataFrame(df_intraday_spot.resample('1min').last().dropna()) + df_intraday_spot = pd.DataFrame( + df_intraday_spot.resample("1min").last().dropna()) # Get Bloomberg calculated total return indices (for spot) - md_request.category = 'fx-tot' - md_request.freq = 'daily' - md_request.data_source = 'bloomberg' + md_request.category = "fx-tot" + md_request.freq = "daily" + md_request.data_source = "bloomberg" df_bbg_tot = market.fetch_market(md_request) - df_bbg_tot.columns = [x + '-bbg' for x in df_bbg_tot.columns] + df_bbg_tot.columns = [x + "-bbg" for x in df_bbg_tot.columns] df_bbg_tot = df_bbg_tot.tz_localize(pytz.utc) - df_bbg_tot.index = df_bbg_tot.index + pd.Timedelta(hours=22) # Roughly NY close 2200 GMT + df_bbg_tot.index = df_bbg_tot.index + pd.Timedelta( + hours=22) # Roughly NY close 2200 GMT - md_request = MarketDataRequest(start_date='01 Jan 2019', finish_date='01 Jul 2019', - data_source='bloomberg', cut='NYC', category='base-depos', - tickers=['GBPON', 'USDON'], - cache_algo='cache_algo_return') + md_request = MarketDataRequest(start_date="01 Jan 2019", + finish_date="01 Jul 2019", + data_source="bloomberg", cut="NYC", + category="base-depos", + tickers=["GBPON", "USDON"], + cache_algo="cache_algo_return") # Join daily deposit data with intraday spot data - # OK to fill down, because deposit data isn't very volatile + # OK to fill down, because deposit data isn"t very volatile df_deposit_rates = market.fetch_market(md_request).tz_localize(pytz.utc) - df_intraday_market = df_intraday_spot.join(df_deposit_rates, how='left') - df_intraday_market = df_intraday_market.fillna(method='ffill').fillna(method='bfill') + df_intraday_market = df_intraday_spot.join(df_deposit_rates, how="left") + df_intraday_market = df_intraday_market.fillna(method="ffill").fillna( + method="bfill") - df_intraday_tot = FXSpotCurve().construct_total_return_index('GBPUSD', df_intraday_market, depo_tenor='ON') + df_intraday_tot = FXSpotCurve().construct_total_return_index( + "GBPUSD", df_intraday_market, depo_tenor="ON") - df_intraday_spot.columns = [x + '-intraday-spot' for x in df_intraday_spot.columns] - df_intraday_tot.columns = [x + '-intraday-tot' for x in df_intraday_spot.columns] + df_intraday_spot.columns = [x + "-intraday-spot" for x in + df_intraday_spot.columns] + df_intraday_tot.columns = [x + "-intraday-tot" for x in + df_intraday_spot.columns] # Combine into a single data frame and plot - df = calculations.join([df_bbg_tot, df_tot, df_intraday_tot, df_intraday_spot], how='outer').fillna(method='ffill') + df = calculations.join( + [df_bbg_tot, df_tot, df_intraday_tot, df_intraday_spot], + how="outer").fillna(method="ffill") df = calculations.create_mult_index_from_prices(df) - chart.plot(df) \ No newline at end of file + chart.plot(df) diff --git a/finmarketpy_examples/fx_vol_surface_animation.py b/finmarketpy_examples/fx_vol_surface_animation.py index 34a2691..a1c3f05 100644 --- a/finmarketpy_examples/fx_vol_surface_animation.py +++ b/finmarketpy_examples/fx_vol_surface_animation.py @@ -1,36 +1,45 @@ -__author__ = 'saeedamen' +__author__ = "saeedamen" # -# Copyright 2016 Cuemacro +# Copyright 2020 Cuemacro # -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the -# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and limitations under the License. +# See the License for the specific language governing permissions and +# limitations under the License. # """ -Shows how to load up FX vol surfaces from Bloomberg and then plot an animation of them. Note, this does not do +Shows how to load up FX vol surfaces from Bloomberg and then plot an animation +of them. Note, this does not do any interpolation. """ from findatapy.market import Market, MarketDataRequest, MarketDataGenerator, FXVolFactory from chartpy import Chart, Style +try: + from finaddpy.market import CachedMarketDataGenerator as MarketDataGenerator +except: + pass + def plot_animated_vol_market(): market = Market(market_data_generator=MarketDataGenerator()) - cross = ['EURUSD']; start_date = '01 Mar 2017'; finish_date = '21 Apr 2017'; sampling = 'no' + cross = ["EURUSD"]; start_date = "01 Mar 2017"; finish_date = "21 Apr 2017"; sampling = "no" md_request = MarketDataRequest(start_date=start_date, finish_date=finish_date, - data_source='bloomberg', cut='NYC', category='fx-implied-vol', - tickers=cross, cache_algo='cache_algo_return') + data_source="bloomberg", cut="NYC", category="fx-implied-vol", + tickers=cross, cache_algo="cache_algo_return") df = market.fetch_market(md_request) - if sampling != 'no': df = df.resample(sampling).mean() + if sampling != "no": df = df.resample(sampling).mean() fxvf = FXVolFactory() df_vs = [] @@ -38,19 +47,19 @@ def plot_animated_vol_market(): for i in range(0, len(df.index)): df_vs.append(fxvf.extract_vol_surface_for_date(df, cross[0], i)) # Do static plot for first day using Plotly - style = Style(title="FX vol surface of " + cross[0], source="chartpy", color='Blues') + style = Style(title="FX vol surface of " + cross[0], source="chartpy", color="Blues") - Chart(df=df_vs[0], chart_type='surface', style=style).plot(engine='plotly') + Chart(df=df_vs[0], chart_type="surface", style=style).plot(engine="plotly") # Now do animation (TODO: need to fix animation in chartpy for matplotlib) - style = Style(title="FX vol surface of " + cross[0], source="chartpy", color='Blues', + style = Style(title="FX vol surface of " + cross[0], source="chartpy", color="Blues", animate_figure=True, animate_titles=df.index, animate_frame_ms=500, normalize_colormap=False) - Chart(df=df_vs, chart_type='surface', style=style).plot(engine='plotly') + Chart(df=df_vs, chart_type="surface", style=style).plot(engine="plotly") # Chart object is initialised with the dataframe and our chart style - Chart(df=df_vs, chart_type='surface', style=style).plot(engine='plotly') + Chart(df=df_vs, chart_type="surface", style=style).plot(engine="plotly") -if __name__ == '__main__': +if __name__ == "__main__": plot_animated_vol_market() \ No newline at end of file diff --git a/finmarketpy_examples/fx_vol_surface_interpolation_examples.py b/finmarketpy_examples/fx_vol_surface_interpolation_examples.py index 750f5d3..1025377 100644 --- a/finmarketpy_examples/fx_vol_surface_interpolation_examples.py +++ b/finmarketpy_examples/fx_vol_surface_interpolation_examples.py @@ -1,4 +1,4 @@ -__author__ = 'saeedamen' +__author__ = "saeedamen" # # Copyright 2020 Cuemacro @@ -37,7 +37,7 @@ logger = LoggerManager().getLogger(__name__) -chart = Chart(engine='plotly') +chart = Chart(engine="plotly") market = Market(market_data_generator=MarketDataGenerator()) # Choose run_example = 0 for everything @@ -47,97 +47,106 @@ # run_example = 4 - get implied vol for a particular strike, interpolating the surface # run_example = 5 - get USDJPY vol surface around US presidential election and plot -run_example = 3 +run_example = 0 ###### Fetch market data for pricing GBPUSD FX options over Brexit vote (ie. FX spot, FX forwards, FX deposits and FX vol quotes) ###### Show how to plot ATM 1M implied_vol vol time series if run_example == 1 or run_example == 0: - # Download the whole all market data for GBPUSD for pricing options (vol surface) - md_request = MarketDataRequest(start_date='01 May 2016', finish_date='01 Aug 2016', - data_source='bloomberg', cut='LDN', category='fx-vol-market', - tickers=['GBPUSD'], - cache_algo='cache_algo_return') + md_request = MarketDataRequest(start_date="01 May 2016", + finish_date="01 Aug 2016", + data_source="bloomberg", cut="LDN", + category="fx-vol-market", + tickers=["GBPUSD"], + cache_algo="cache_algo_return") df = market.fetch_market(md_request) style = Style() - style.title = 'GBPUSD 1M Implied Vol' + style.title = "GBPUSD 1M Implied Vol" style.scale_factor = 3 - style.source = 'Bloomberg' + style.source = "Bloomberg" - chart.plot(df['GBPUSDV1M.close'], style=style) + chart.plot(df["GBPUSDV1M.close"], style=style) ###### Fetch market data for pricing GBPUSD FX options over Brexit vote (ie. FX spot, FX forwards, FX deposits and FX vol quotes) ###### Construct volatility surface using FinancePy library underneath, using polynomial interpolation if run_example == 2 or run_example == 0: - - horizon_date = '23 Jun 2016' - cross = 'GBPUSD' + horizon_date = "23 Jun 2016" + cross = "GBPUSD" # Download the whole all market data for GBPUSD for pricing options (vol surface) - md_request = MarketDataRequest(start_date=horizon_date, finish_date=horizon_date, - data_source='bloomberg', cut='LDN', category='fx-vol-market', + md_request = MarketDataRequest(start_date=horizon_date, + finish_date=horizon_date, + data_source="bloomberg", cut="LDN", + category="fx-vol-market", tickers=cross, - cache_algo='cache_algo_return') + cache_algo="cache_algo_return") df = market.fetch_market(md_request) - fx_vol_surface = FXVolSurface(market_df=df, vol_function_type='BBG', asset=cross) + fx_vol_surface = FXVolSurface(market_df=df, vol_function_type="BBG", + asset=cross) fx_vol_surface.build_vol_surface(horizon_date) # Note for unstable vol surface dates (eg. over Brexit date) you may need to increase tolerance in FinancePy - # FinFXVolSurface.buildVolSurface method to get it to fill, or choose different vol_function_type (eg. 'CLARK5') - df_vol_dict = fx_vol_surface.extract_vol_surface(low_K_pc=0.80, high_K_pc=1.1) + # FinFXVolSurface.buildVolSurface method to get it to fill, or choose different vol_function_type (eg. "CLARK5") + df_vol_dict = fx_vol_surface.extract_vol_surface(low_K_pc=0.80, + high_K_pc=1.1) # Print out the various vol surface and data produced - print(df_vol_dict['vol_surface_implied_pdf']) - print(df_vol_dict['vol_surface_strike_space']) - print(df_vol_dict['vol_surface_delta_space']) - print(df_vol_dict['vol_surface_delta_space_exc_ms']) - print(df_vol_dict['deltas_vs_strikes']) - print(df_vol_dict['vol_surface_quoted_points']) + print(df_vol_dict["vol_surface_implied_pdf"]) + print(df_vol_dict["vol_surface_strike_space"]) + print(df_vol_dict["vol_surface_delta_space"]) + print(df_vol_dict["vol_surface_delta_space_exc_ms"]) + print(df_vol_dict["deltas_vs_strikes"]) + print(df_vol_dict["vol_surface_quoted_points"]) # Plot vol surface in strike space (all interpolated) # x_axis = strike - index # y_axis = tenor - columns # z_axis = implied vol - values - chart.plot(df_vol_dict['vol_surface_strike_space'].iloc[:, ::-1], chart_type='surface', - style=Style(title='Plotting volatility in strike space')) + chart.plot(df_vol_dict["vol_surface_strike_space"].iloc[:, ::-1], + chart_type="surface", + style=Style(title="Plotting volatility in strike space")) # Plot vol surface in delta space (exc market strangle strikes) - chart.plot(df_vol_dict['vol_surface_delta_space_exc_ms'].iloc[:, ::-1], - chart_type='surface', style=Style(title='Plotting in delta space')) + chart.plot(df_vol_dict["vol_surface_delta_space_exc_ms"].iloc[:, ::-1], + chart_type="surface", + style=Style(title="Plotting in delta space")) # Plot implied PDF in strike space (all interpolated) # x_axis = strike - index # y_axis = tenor - columns # z_axis = implied PDF - values - chart.plot(df_vol_dict['vol_surface_implied_pdf'], chart_type='surface', - style=Style(title='Plotting implied PDF in strike space')) + chart.plot(df_vol_dict["vol_surface_implied_pdf"], chart_type="surface", + style=Style(title="Plotting implied PDF in strike space")) # Plot the implied PDF for ON only versus strikes - chart.plot(df_vol_dict['vol_surface_implied_pdf']['ON'], chart_type='line', - style=Style(title='Plotting implied PDF in strike space ON around Brexit', x_axis_range=[1.0,1.8])) - + chart.plot(df_vol_dict["vol_surface_implied_pdf"]["ON"], chart_type="line", + style=Style( + title="Plotting implied PDF in strike space ON around Brexit", + x_axis_range=[1.0, 1.8])) ###### Fetch market data for pricing GBPUSD FX options over Brexit vote (ie. FX spot, FX forwards, FX deposits and FX vol quotes) ###### Do animation for vol surface if run_example == 3 or run_example == 0: # Download the whole all market data for GBPUSD for pricing options (vol surface) # Using LDN close data (CMPL) - md_request = MarketDataRequest(start_date='01 Jun 2016', finish_date='30 Jul 2016', - data_source='bloomberg', cut='LDN', category='fx-vol-market', - tickers=['GBPUSD'], - cache_algo='cache_algo_return') + md_request = MarketDataRequest(start_date="01 Jun 2016", + finish_date="30 Jul 2016", + data_source="bloomberg", cut="LDN", + category="fx-vol-market", + tickers=["GBPUSD"], + cache_algo="cache_algo_return") # 01 Jun 2016, 30 Jun 2016 # 20 Jun 2016, 24 Jun 2016 df = market.fetch_market(md_request) - fx_vol_surface = FXVolSurface(market_df=df, asset='GBPUSD') + fx_vol_surface = FXVolSurface(market_df=df, asset="GBPUSD") animate_titles = [] @@ -146,10 +155,11 @@ # Note this does take a few minutes, given it's fitting the vol surface for every date # TODO explore speeding up using Numba or similar - vol_surface_dict, extremes_dict = fx_vol_surface.extract_vol_surface_across_dates(df.index, - vol_surface_type='vol_surface_strike_space') + vol_surface_dict, extremes_dict = fx_vol_surface.extract_vol_surface_across_dates( + df.index, + vol_surface_type="vol_surface_strike_space") - animate_titles = [x.strftime('%d %b %Y') for x in vol_surface_dict.keys()] + animate_titles = [x.strftime("%d %b %Y") for x in vol_surface_dict.keys()] print(extremes_dict) @@ -157,35 +167,41 @@ # x_axis = strike - index # y_axis = tenor - columns # z_axis = implied_vol vol - values - style = Style(title='Plotting in strike space', animate_figure=True, animate_titles=animate_titles) + style = Style(title="Plotting in strike space", animate_figure=True, + animate_titles=animate_titles) - chart.plot(list(vol_surface_dict.values()), chart_type='surface', style=style) + chart.plot(list(vol_surface_dict.values()), chart_type="surface", + style=style) ###### Fetch market data for pricing GBPUSD FX options over Brexit vote (ie. FX spot, FX forwards, FX deposits and FX vol quotes) ###### Get implied_vol vol for specific strikes interpolating across surface if run_example == 4 or run_example == 0: # Download the whole all market data for GBPUSD for pricing options (vol surface) - md_request = MarketDataRequest(start_date='20 Jun 2016', finish_date='25 Jun 2016', - data_source='bloomberg', cut='LDN', category='fx-vol-market', - tickers=['GBPUSD'], - cache_algo='cache_algo_return') + md_request = MarketDataRequest(start_date="20 Jun 2016", + finish_date="25 Jun 2016", + data_source="bloomberg", cut="LDN", + category="fx-vol-market", + tickers=["GBPUSD"], + cache_algo="cache_algo_return") df = market.fetch_market(md_request) - fx_vol_surface = FXVolSurface(market_df=df, asset='GBPUSD') + fx_vol_surface = FXVolSurface(market_df=df, asset="GBPUSD") df_vol_surface_strike_space_list = [] animate_titles = [] - fx_vol_surface.build_vol_surface('20 Jun 2016') + fx_vol_surface.build_vol_surface("20 Jun 2016") # Get the implied_vol volatility for a specific strike (GBPUSD=1.4000 in the 1W tenor) for 20 Jun 2016 - vol_at_strike = fx_vol_surface.calculate_vol_for_strike_expiry(1.4000, tenor='1W') + vol_at_strike = fx_vol_surface.calculate_vol_for_strike_expiry(1.4000, + tenor="1W") - fx_vol_surface.build_vol_surface('23 Jun 2016') + fx_vol_surface.build_vol_surface("23 Jun 2016") # Get the implied_vol volatility for a specific strike (GBPUSD=1.4000 in the 1W tenor) for 23 Jun 2016 - vol_at_strike = fx_vol_surface.calculate_vol_for_strike_expiry(1.4000, tenor='1W') + vol_at_strike = fx_vol_surface.calculate_vol_for_strike_expiry(1.4000, + tenor="1W") print(vol_at_strike) @@ -193,20 +209,21 @@ ###### (ie. FX spot, FX forwards, FX deposits and FX vol quotes) ###### Construct volatility surface using FinancePy library underneath, using polynomial interpolation if run_example == 5 or run_example == 0: - - horizon_date = '03 Nov 2020' + horizon_date = "03 Nov 2020" # Download the whole all market data for GBPUSD for pricing options (vol surface) - md_request = MarketDataRequest(start_date=horizon_date, finish_date=horizon_date, - data_source='bloomberg', cut='NYC', category='fx-vol-market', - tickers=['USDJPY'], - cache_algo='cache_algo_return') + md_request = MarketDataRequest(start_date=horizon_date, + finish_date=horizon_date, + data_source="bloomberg", cut="NYC", + category="fx-vol-market", + tickers=["USDJPY"], + cache_algo="cache_algo_return") df = market.fetch_market(md_request) - # Skip 3W/4M because this particular close (NYC) doesn't have that in USDJPY market data + # Skip 3W/4M because this particular close (NYC) doesn"t have that in USDJPY market data tenors = ["ON", "1W", "2W", "1M", "2M", "3M", "6M", "9M", "1Y", "2Y", "3Y"] - fx_vol_surface = FXVolSurface(market_df=df, tenors=tenors, asset='USDJPY') + fx_vol_surface = FXVolSurface(market_df=df, tenors=tenors, asset="USDJPY") fx_vol_surface.build_vol_surface(horizon_date) @@ -218,9 +235,11 @@ # x_axis = strike - index # y_axis = tenor - columns # z_axis = implied vol - values - chart.plot(df_vol_dict['vol_surface_strike_space'].iloc[:, ::-1], chart_type='surface', - style=Style(title='Plotting volatility in strike space')) + chart.plot(df_vol_dict["vol_surface_strike_space"].iloc[:, ::-1], + chart_type="surface", + style=Style(title="Plotting volatility in strike space")) # Plot vol surface in delta space (exc market strangle strikes) - chart.plot(df_vol_dict['vol_surface_delta_space_exc_ms'].iloc[:, ::-1], - chart_type='surface', style=Style(title='Plotting in delta space')) \ No newline at end of file + chart.plot(df_vol_dict["vol_surface_delta_space_exc_ms"].iloc[:, ::-1], + chart_type="surface", + style=Style(title="Plotting in delta space")) diff --git a/finmarketpy_examples/quandl_examples.py b/finmarketpy_examples/quandl_examples.py index 15b9246..299d679 100644 --- a/finmarketpy_examples/quandl_examples.py +++ b/finmarketpy_examples/quandl_examples.py @@ -1,15 +1,18 @@ -__author__ = 'saeedamen' # Saeed Amen +__author__ = "saeedamen" # -# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro +# Copyright 2020 Cuemacro # -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the -# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and limitations under the License. +# See the License for the specific language governing permissions and +# limitations under the License. # @@ -28,7 +31,7 @@ logger = LoggerManager().getLogger(__name__) -chart = Chart(engine='matplotlib') +chart = Chart(engine="matplotlib") market = Market(market_data_generator=MarketDataGenerator()) @@ -39,26 +42,28 @@ ###### fetch data from Quandl for BoE rate (using Bloomberg data) if run_example == 1 or run_example == 0: - # Monthly average of UK resident monetary financial institutions' (excl. Central Bank) sterling - # Weighted average interest rate, other loans, new advances, on a fixed rate to private non-financial corporations (in percent) + # Monthly average of UK resident monetary financial institutions" + # (excl. Central Bank) sterling + # Weighted average interest rate, other loans, new advances, on a + # fixed rate to private non-financial corporations (in percent) # not seasonally adjusted md_request = MarketDataRequest( start_date="01 Jan 2000", # start date - data_source='quandl', # use Quandl as data source - tickers=['Weighted interest rate'], - fields=['close'], # which fields to download - vendor_tickers=['BOE/CFMBJ84'], # ticker (Bloomberg) - vendor_fields=['close'], # which Bloomberg fields to download - cache_algo='internet_load_return', - quandl_api_key='TYPE YOUR KEY HERE') # how to return data + data_source="quandl", # use Quandl as data source + tickers=["Weighted interest rate"], + fields=["close"], # which fields to download + vendor_tickers=["BOE/CFMBJ84"], # ticker (Bloomberg) + vendor_fields=["close"], # which Bloomberg fields to download + cache_algo="internet_load_return", + quandl_api_key="TYPE YOUR KEY HERE") # how to return data df = market.fetch_market(md_request) style = Style() - style.title = 'BoE weighted interest rate' + style.title = "BoE weighted interest rate" style.scale_factor = 3 style.file_output = "boe-rate.png" - style.source = 'Quandl/BoE' + style.source = "Quandl/BoE" chart.plot(df, style=style) \ No newline at end of file diff --git a/finmarketpy_examples/returns_examples.py b/finmarketpy_examples/returns_examples.py index 1cb0fa5..f02bd44 100644 --- a/finmarketpy_examples/returns_examples.py +++ b/finmarketpy_examples/returns_examples.py @@ -1,18 +1,20 @@ -__author__ = 'saeedamen' # Saeed Amen +__author__ = "saeedamen" # -# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro +# Copyright 2020 Cuemacro # -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the -# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and limitations under the License. +# See the License for the specific language governing permissions and +# limitations under the License. # - """ Shows how to calculate returns of an asset """ @@ -32,26 +34,26 @@ calc = Calculations() logger = LoggerManager().getLogger(__name__) -chart = Chart(engine='matplotlib') +chart = Chart(engine="matplotlib") market = Market(market_data_generator=MarketDataGenerator()) # Choose run_example = 0 for everything -# run_example = 1 - use PyFolio to analyse gold's return properties +# run_example = 1 - use PyFolio to analyse gold"s return properties run_example = 0 -###### Use PyFolio to analyse gold's return properties +###### Use PyFolio to analyse gold"s return properties if run_example == 1 or run_example == 0: md_request = MarketDataRequest( - start_date = "01 Jan 1996", # start date - data_source = 'bloomberg', # use Bloomberg as data source - tickers = ['Gold'], - fields = ['close'], # which fields to download - vendor_tickers = ['XAUUSD Curncy'], # ticker (Bloomberg) - vendor_fields = ['PX_LAST'], # which Bloomberg fields to download - cache_algo = 'internet_load_return') # how to return data + start_date="01 Jan 1996", # start date + data_source="bloomberg", # use Bloomberg as data source + tickers=["Gold"], + field =["close"], # which fields to download + vendor_tickers=["XAUUSD Curncy"], # ticker (Bloomberg) + vendor_fields=["PX_LAST"], # which Bloomberg fields to download + cache_algo="internet_load_return") # how to return data df = market.fetch_market(md_request) - ta.run_strategy_returns_stats(None, index=df, engine='pyfolio') \ No newline at end of file + ta.run_strategy_returns_stats(None, index=df, engine="pyfolio") \ No newline at end of file diff --git a/finmarketpy_examples/seasonality_examples.py b/finmarketpy_examples/seasonality_examples.py index 7ef5858..6387972 100644 --- a/finmarketpy_examples/seasonality_examples.py +++ b/finmarketpy_examples/seasonality_examples.py @@ -1,15 +1,18 @@ -__author__ = 'saeedamen' # Saeed Amen +__author__ = "saeedamen" # -# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro +# Copyright 2020 Cuemacro # -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the -# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and limitations under the License. +# See the License for the specific language governing permissions and +# limitations under the License. # """ @@ -33,7 +36,7 @@ calc = Calculations() logger = LoggerManager().getLogger(__name__) -chart = Chart(engine='matplotlib') +chart = Chart(engine="matplotlib") market = Market(market_data_generator=MarketDataGenerator()) @@ -50,12 +53,12 @@ if run_example == 1 or run_example == 0: md_request = MarketDataRequest( start_date = "01 Jan 1996", # start date - data_source = 'bloomberg', # use Bloomberg as data source - tickers = ['Gold'], - fields = ['close'], # which fields to download - vendor_tickers = ['XAUUSD Curncy'], # ticker (Bloomberg) - vendor_fields = ['PX_LAST'], # which Bloomberg fields to download - cache_algo = 'internet_load_return') # how to return data + data_source = "bloomberg", # use Bloomberg as data source + tickers = ["Gold"], + fields = ["close"], # which fields to download + vendor_tickers = ["XAUUSD Curncy"], # ticker (Bloomberg) + vendor_fields = ["PX_LAST"], # which Bloomberg fields to download + cache_algo = "internet_load_return") # how to return data df = market.fetch_market(md_request) @@ -65,8 +68,8 @@ day_of_month_seasonality = calc.convert_month_day_to_date_time(day_of_month_seasonality) style = Style() - style.date_formatter = '%b' - style.title = 'Gold seasonality' + style.date_formatter = "%b" + style.title = "Gold seasonality" style.scale_factor = 3 style.file_output = "gold-seasonality.png" @@ -74,16 +77,16 @@ ###### Calculate seasonal moves in FX vol (using Bloomberg data) if run_example == 2 or run_example == 0: - tickers = ['EURUSDV1M', 'USDJPYV1M', 'GBPUSDV1M', 'AUDUSDV1M'] + tickers = ["EURUSDV1M", "USDJPYV1M", "GBPUSDV1M", "AUDUSDV1M"] md_request = MarketDataRequest( start_date = "01 Jan 1996", # start date - data_source = 'bloomberg', # use Bloomberg as data source + data_source = "bloomberg", # use Bloomberg as data source tickers = tickers, - fields = ['close'], # which fields to download - vendor_tickers = [x + ' Curncy' for x in tickers], # ticker (Bloomberg) - vendor_fields = ['PX_LAST'], # which Bloomberg fields to download - cache_algo = 'internet_load_return') # how to return data + fields = ["close"], # which fields to download + vendor_tickers = [x + " Curncy" for x in tickers], # ticker (Bloomberg) + vendor_fields = ["PX_LAST"], # which Bloomberg fields to download + cache_algo = "internet_load_return") # how to return data df = market.fetch_market(md_request) @@ -93,11 +96,11 @@ day_of_month_seasonality = calc.convert_month_day_to_date_time(day_of_month_seasonality) style = Style() - style.date_formatter = '%b' - style.title = 'FX vol seasonality' + style.date_formatter = "%b" + style.title = "FX vol seasonality" style.scale_factor = 3 style.file_output = "fx-vol-seasonality.png" - style.source = 'finmarketpy/Bloomberg' + style.source = "finmarketpy/Bloomberg" chart.plot(day_of_month_seasonality, style=style) @@ -105,12 +108,12 @@ if run_example == 3 or run_example == 0: md_request = MarketDataRequest( start_date = "01 Jan 1996", # start date - data_source = 'bloomberg', # use Bloomberg as data source - tickers = ['Gasoline'], - fields = ['close'], # which fields to download - vendor_tickers = ['XB1 Comdty'], # ticker (Bloomberg) - vendor_fields = ['PX_LAST'], # which Bloomberg fields to download - cache_algo = 'internet_load_return') # how to return data + data_source = "bloomberg", # use Bloomberg as data source + tickers = ["Gasoline"], + fields = ["close"], # which fields to download + vendor_tickers = ["XB1 Comdty"], # ticker (Bloomberg) + vendor_fields = ["PX_LAST"], # which Bloomberg fields to download + cache_algo = "internet_load_return") # how to return data df = market.fetch_market(md_request) @@ -120,8 +123,8 @@ day_of_month_seasonality = calc.convert_month_day_to_date_time(day_of_month_seasonality) style = Style() - style.date_formatter = '%b' - style.title = 'Gasoline seasonality' + style.date_formatter = "%b" + style.title = "Gasoline seasonality" style.scale_factor = 3 style.file_output = "gasoline-seasonality.png" @@ -132,11 +135,11 @@ # get the NFP NSA from ALFRED/FRED md_request = MarketDataRequest( start_date="01 Jun 2000", # start date (download data over past decade) - data_source='alfred', # use ALFRED/FRED as data source - tickers=['US NFP'], # ticker - fields=['actual-release'], # which fields to download - vendor_tickers=['PAYNSA'], # ticker (FRED) PAYEMS (NSA) - vendor_fields=['actual-release']) # which FRED fields to download + data_source="alfred", # use ALFRED/FRED as data source + tickers=["US NFP"], # ticker + fields=["actual-release"], # which fields to download + vendor_tickers=["PAYNSA"], # ticker (FRED) PAYEMS (NSA) + vendor_fields=["actual-release"]) # which FRED fields to download df = market.fetch_market(md_request) @@ -145,8 +148,8 @@ month_seasonality = seasonality.monthly_seasonality_from_prices(df) style = Style() - style.date_formatter = '%b' - style.title = 'NFP seasonality' + style.date_formatter = "%b" + style.title = "NFP seasonality" style.scale_factor = 3 style.file_output = "nfp-seasonality.png" @@ -157,29 +160,29 @@ # get the NFP NSA from ALFRED/FRED md_request = MarketDataRequest( start_date="01 Jun 1980", # start date (download data over past decade) - data_source='alfred', # use ALFRED/FRED as data source - tickers=['US NFP (NSA)', 'US NFP (SA)'], # ticker - fields=['actual-release'], # which fields to download - vendor_tickers=['PAYNSA', 'PAYEMS'], # ticker (FRED) PAYEMS (SA) PAYNSA (NSA) - vendor_fields=['actual-release']) # which FRED fields to download + data_source="alfred", # use ALFRED/FRED as data source + tickers=["US NFP (NSA)", "US NFP (SA)"], # ticker + fields=["actual-release"], # which fields to download + vendor_tickers=["PAYNSA", "PAYEMS"], # ticker (FRED) PAYEMS (SA) PAYNSA (NSA) + vendor_fields=["actual-release"]) # which FRED fields to download df = market.fetch_market(md_request) # Calculate changes in NFP df = df - df.shift(1) - df_seasonal_adjusted = seasonality.adjust_rolling_seasonality(pandas.DataFrame(df['US NFP (NSA).actual-release']), + df_seasonal_adjusted = seasonality.adjust_rolling_seasonality(pandas.DataFrame(df["US NFP (NSA).actual-release"]), window=12*20, likely_period=12) - df_seasonal_adjusted.columns = [x + ' SA finmarketpy' for x in df_seasonal_adjusted.columns] + df_seasonal_adjusted.columns = [x + " SA finmarketpy" for x in df_seasonal_adjusted.columns] # Compare not seasonally adjusted vs seasonally adjusted df = df.join(df_seasonal_adjusted) - df = df[df.index > '01 Jan 2000'] + df = df[df.index > "01 Jan 2000"] style = Style() - style.title = 'NFP (seasonally adjusted)' + style.title = "NFP (seasonally adjusted)" style.scale_factor = 3 style.file_output = "nfp-seasonally-adjusted.png" diff --git a/finmarketpy_examples/technicals_example.py b/finmarketpy_examples/technicals_example.py index 28af805..892bc5b 100644 --- a/finmarketpy_examples/technicals_example.py +++ b/finmarketpy_examples/technicals_example.py @@ -1,15 +1,18 @@ -__author__ = 'saeedamen' # Saeed Amen +__author__ = "saeedamen" # -# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro +# Copyright 2020 Cuemacro # -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the -# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and limitations under the License. +# See the License for the specific language governing permissions and +# limitations under the License. # @@ -24,7 +27,7 @@ logger = LoggerManager().getLogger(__name__) -chart = Chart(engine='matplotlib') +chart = Chart(engine="matplotlib") market = Market(market_data_generator=MarketDataGenerator()) tech_ind = TechIndicator() @@ -40,12 +43,12 @@ # Downloaded S&P500 md_request = MarketDataRequest( start_date = "01 Jan 2000", # start date - data_source = 'quandl', # use Quandl as data source - tickers = ['S&P500'], - fields = ['close', 'open', 'high', 'low'], # which fields to download - vendor_tickers = ['YAHOO/INDEX_GSPC'], # ticker (Bloomberg) - vendor_fields = ['close', 'open', 'high', 'low'], # which Bloomberg fields to download - cache_algo = 'internet_load_return') # how to return data + data_source = "quandl", # use Quandl as data source + tickers = ["S&P500"], + fields = ["close", "open", "high", "low"], # which fields to download + vendor_tickers = ["YAHOO/INDEX_GSPC"], # ticker (Bloomberg) + vendor_fields = ["close", "open", "high", "low"], # which Bloomberg fields to download + cache_algo = "internet_load_return") # how to return data df = market.fetch_market(md_request) @@ -53,15 +56,15 @@ tech_params = TechParams() tech_params.atr_period = 14 - tech_ind.create_tech_ind(df, 'ATR', tech_params) + tech_ind.create_tech_ind(df, "ATR", tech_params) style = Style() - style.title = 'S&P500 ATR' + style.title = "S&P500 ATR" style.scale_factor = 2 style.file_output = "sp500.png" - style.source = 'Quandl/Yahoo' + style.source = "Quandl/Yahoo" df = tech_ind.get_techind() - chart.plot(df, style=style) \ No newline at end of file + chart.plot(df, style=style) diff --git a/finmarketpy_examples/tradingmodelfxtrend_bbg_example.py b/finmarketpy_examples/tradingmodelfxtrend_bbg_example.py index d84a67c..9a269c7 100644 --- a/finmarketpy_examples/tradingmodelfxtrend_bbg_example.py +++ b/finmarketpy_examples/tradingmodelfxtrend_bbg_example.py @@ -1,15 +1,18 @@ -__author__ = 'saeedamen' # Saeed Amen +__author__ = "saeedamen" # -# Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro +# Copyright 2020 Cuemacro # -# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the -# License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # -# See the License for the specific language governing permissions and limitations under the License. +# See the License for the specific language governing permissions and +# limitations under the License. # import datetime @@ -19,7 +22,8 @@ from finmarketpy.economics import TechIndicator class TradingModelFXTrend_BBG_Example(TradingModel): - """Shows how to create a simple FX CTA style strategy, using the TradingModel abstract class (backtest_examples.py + """Shows how to create a simple FX CTA style strategy, using the + TradingModel abstract class (backtest_examples.py is a lower level way of doing this). Uses BBG total returns data. """ @@ -28,10 +32,10 @@ def __init__(self): ##### FILL IN WITH YOUR OWN PARAMETERS FOR display, dumping, TSF etc. self.market = Market(market_data_generator=MarketDataGenerator()) - self.DUMP_PATH = '' - self.FINAL_STRATEGY = 'FX trend' + self.DUMP_PATH = "" + self.FINAL_STRATEGY = "FX trend" self.SCALE_FACTOR = 1 - self.DEFAULT_PLOT_ENGINE = 'matplotlib' + self.DEFAULT_PLOT_ENGINE = "matplotlib" self.br = self.load_parameters() return @@ -61,7 +65,7 @@ def load_parameters(self, br = None): br.signal_vol_max_leverage = 5 br.signal_vol_periods = 20 br.signal_vol_obs_in_year = 252 - br.signal_vol_rebalance_freq = 'BM' + br.signal_vol_rebalance_freq = "BM" br.signal_vol_resample_freq = None # Have vol target for portfolio @@ -70,7 +74,7 @@ def load_parameters(self, br = None): br.portfolio_vol_max_leverage = 5 br.portfolio_vol_periods = 20 br.portfolio_vol_obs_in_year = 252 - br.portfolio_vol_rebalance_freq = 'BM' + br.portfolio_vol_rebalance_freq = "BM" br.portfolio_vol_resample_freq = None # Tech params @@ -84,32 +88,32 @@ def load_assets(self, br = None): logger = LoggerManager().getLogger(__name__) # For FX basket - full_bkt = ['EURUSD', 'USDJPY', 'GBPUSD', 'AUDUSD', 'USDCAD', - 'NZDUSD', 'USDCHF', 'USDNOK', 'USDSEK'] + full_bkt = ["EURUSD", "USDJPY", "GBPUSD", "AUDUSD", "USDCAD", + "NZDUSD", "USDCHF", "USDNOK", "USDSEK"] basket_dict = {} for i in range(0, len(full_bkt)): basket_dict[full_bkt[i]] = [full_bkt[i]] - basket_dict['FX trend'] = full_bkt + basket_dict["FX trend"] = full_bkt br = self.load_parameters(br = br) logger.info("Loading asset data...") - vendor_tickers = [x + 'CR CMPN Curncy' for x in full_bkt] + vendor_tickers = [x + "CR CMPN Curncy" for x in full_bkt] market_data_request = MarketDataRequest( - start_date = br.start_date, # start date - finish_date = br.finish_date, # finish date - freq = 'daily', # daily data - data_source = 'bloomberg', # use Quandl as data source - tickers = full_bkt, # ticker (Cuemacro) - fields = ['close'], # which fields to download - vendor_tickers = vendor_tickers, # ticker (Quandl) - vendor_fields = ['PX_LAST'], # which Bloomberg fields to download - cache_algo = 'internet_load_return') # how to return data + start_date=br.start_date, # start date + finish_date=br.finish_date, # finish date + freq="daily", # daily data + data_source="bloomberg", # use Quandl as data source + tickers=full_bkt, # ticker (Cuemacro) + fields=["close"], # which fields to download + vendor_tickers=vendor_tickers, # ticker (Quandl) + vendor_fields=["PX_LAST"], # which Bloomberg fields to download + cache_algo="internet_load_return") # how to return data asset_df = self.market.fetch_market(market_data_request) @@ -117,8 +121,9 @@ def load_assets(self, br = None): if asset_df is None: import pandas - asset_df = pandas.read_csv("d:/fxcta.csv", index_col=0, parse_dates=['Date'], - date_parser = lambda x: pandas.datetime.strptime(x, '%Y-%m-%d')) + asset_df = pandas.read_csv( + "fxcta.csv", index_col=0, parse_dates=["Date"], + date_parser=lambda x: pandas.datetime.strptime(x, "%Y-%m-%d")) # Signalling variables spot_df = asset_df @@ -133,9 +138,11 @@ def construct_signal(self, spot_df, spot_df2, tech_params, br, run_in_parallel=F ##### FILL IN WITH YOUR OWN SIGNALS # Use technical indicator to create signals - # (we could obviously create whatever function we wanted for generating the signal dataframe) + # (we could obviously create whatever function we wanted for generating + # the signal dataframe) tech_ind = TechIndicator() - tech_ind.create_tech_ind(spot_df, 'SMA', tech_params); signal_df = tech_ind.get_signal() + tech_ind.create_tech_ind(spot_df, "SMA", tech_params) + signal_df = tech_ind.get_signal() return signal_df @@ -144,15 +151,15 @@ def construct_strategy_benchmark(self): ###### FILL IN WITH YOUR OWN BENCHMARK tsr_indices = MarketDataRequest( - start_date = self.br.start_date, # start date - finish_date = self.br.finish_date, # finish date - freq = 'daily', # intraday data - data_source = 'bloomberg', # use Bloomberg as data source - tickers = ["EURUSD"], # tickers to download - vendor_tickers=['EURUSDCR CMPN Curncy'], - fields = ['close'], # which fields to download - vendor_fields = ['PX_LAST'], - cache_algo = 'internet_load_return') # how to return data + start_date=self.br.start_date, # start date + finish_date=self.br.finish_date, # finish date + freq="daily", # daily data + data_source="bloomberg", # use Bloomberg as data source + tickers=["EURUSD"], # tickers to download + vendor_tickers=["EURUSDCR CMPN Curncy"], + fields=["close"], # which fields to download + vendor_fields=["PX_LAST"], + cache_algo="internet_load_return") # how to return data df = self.market.fetch_market(tsr_indices) @@ -160,7 +167,7 @@ def construct_strategy_benchmark(self): return df -if __name__ == '__main__': +if __name__ == "__main__": # Just change "False" to "True" to run any of the below examples @@ -182,8 +189,8 @@ def construct_strategy_benchmark(self): ta = TradeAnalysis() # Create statistics for the model returns using both finmarketpy and pyfolio - ta.run_strategy_returns_stats(model, engine='finmarketpy') - # ta.run_strategy_returns_stats(model, engine='pyfolio') + ta.run_strategy_returns_stats(model, engine="finmarketpy") + # ta.run_strategy_returns_stats(model, engine="pyfolio") # model.plot_strategy_group_benchmark_annualised_pnl() @@ -195,20 +202,20 @@ def construct_strategy_benchmark(self): from finmarketpy.backtest import TradeAnalysis ta = TradeAnalysis() - ta.run_strategy_returns_stats(model, engine='finmarketpy') + ta.run_strategy_returns_stats(model, engine="finmarketpy") # Which backtesting parameters to change # names of the portfolio # broad type of parameter name parameter_list = [ - {'portfolio_vol_adjust': True, 'signal_vol_adjust' : True}, - {'portfolio_vol_adjust': False, 'signal_vol_adjust' : False}] + {"portfolio_vol_adjust": True, "signal_vol_adjust" : True}, + {"portfolio_vol_adjust": False, "signal_vol_adjust" : False}] pretty_portfolio_names = \ - ['Vol target', - 'No vol target'] + ["Vol target", + "No vol target"] - parameter_type = 'vol target' + parameter_type = "vol target" ta.run_arbitrary_sensitivity(strategy, parameter_list=parameter_list, diff --git a/finmarketpy_examples/tradingmodelfxtrend_example.py b/finmarketpy_examples/tradingmodelfxtrend_example.py index d95d31f..38fc007 100644 --- a/finmarketpy_examples/tradingmodelfxtrend_example.py +++ b/finmarketpy_examples/tradingmodelfxtrend_example.py @@ -1,4 +1,4 @@ -__author__ = 'saeedamen' # Saeed Amen +__author__ = "saeedamen" # Saeed Amen # # Copyright 2016-2020 Cuemacro - https://www.cuemacro.com / @cuemacro @@ -21,6 +21,11 @@ from chartpy import Style +from findatapy.util.dataconstants import DataConstants + +# You will likely need to change this! +quandl_api_key = DataConstants().quandl_api_key + class TradingModelFXTrend_Example(TradingModel): """Shows how to create a simple FX CTA style strategy, using the TradingModel abstract class (backtest_examples.py is a lower level way of doing this). @@ -31,17 +36,17 @@ def __init__(self): ##### FILL IN WITH YOUR OWN PARAMETERS FOR display, dumping, TSF etc. self.market = Market(market_data_generator=MarketDataGenerator()) - self.DUMP_PATH = '' - self.FINAL_STRATEGY = 'FX trend' + self.DUMP_PATH = "" + self.FINAL_STRATEGY = "FX trend" self.SCALE_FACTOR = 1 - self.DEFAULT_PLOT_ENGINE = 'matplotlib' - # self.CHART_STYLE = Style(plotly_plot_mode='offline_jupyter') + self.DEFAULT_PLOT_ENGINE = "matplotlib" + # self.CHART_STYLE = Style(plotly_plot_mode="offline_jupyter") self.br = self.load_parameters() return ###### Parameters and signal generations (need to be customised for every model) - def load_parameters(self, br = None): + def load_parameters(self, br=None): if br is not None: return br @@ -66,7 +71,7 @@ def load_parameters(self, br = None): br.signal_vol_max_leverage = 5 br.signal_vol_periods = 20 br.signal_vol_obs_in_year = 252 - br.signal_vol_rebalance_freq = 'BM' + br.signal_vol_rebalance_freq = "BM" br.signal_vol_resample_freq = None # Have vol target for portfolio @@ -75,14 +80,14 @@ def load_parameters(self, br = None): br.portfolio_vol_max_leverage = 5 br.portfolio_vol_periods = 20 br.portfolio_vol_obs_in_year = 252 - br.portfolio_vol_rebalance_freq = 'BM' + br.portfolio_vol_rebalance_freq = "BM" br.portfolio_vol_resample_freq = None # Tech params br.tech_params.sma_period = 200 # To make additive indices - # br.cum_index = 'add' + # br.cum_index = "add" return br @@ -92,33 +97,37 @@ def load_assets(self, br = None): logger = LoggerManager().getLogger(__name__) # For FX basket - full_bkt = ['EURUSD', 'USDJPY', 'GBPUSD', 'AUDUSD', 'USDCAD', - 'NZDUSD', 'USDCHF', 'USDNOK', 'USDSEK'] + full_bkt = ["EURUSD", "USDJPY", "GBPUSD", "AUDUSD", "USDCAD", + "NZDUSD", "USDCHF", "USDNOK", "USDSEK"] basket_dict = {} for i in range(0, len(full_bkt)): basket_dict[full_bkt[i]] = [full_bkt[i]] - basket_dict['FX trend'] = full_bkt + basket_dict["FX trend"] = full_bkt br = self.load_parameters(br = br) logger.info("Loading asset data...") - vendor_tickers = ['FRED/DEXUSEU', 'FRED/DEXJPUS', 'FRED/DEXUSUK', 'FRED/DEXUSAL', 'FRED/DEXCAUS', - 'FRED/DEXUSNZ', 'FRED/DEXSZUS', 'FRED/DEXNOUS', 'FRED/DEXSDUS'] + vendor_tickers = ["FRED/DEXUSEU", "FRED/DEXJPUS", + "FRED/DEXUSUK", "FRED/DEXUSAL", + "FRED/DEXCAUS", + "FRED/DEXUSNZ", "FRED/DEXSZUS", + "FRED/DEXNOUS", "FRED/DEXSDUS"] market_data_request = MarketDataRequest( - start_date = br.start_date, # start date - finish_date = br.finish_date, # finish date - freq = 'daily', # daily data - data_source = 'quandl', # use Quandl as data source - tickers = full_bkt, # ticker (Thalesians) - fields = ['close'], # which fields to download - vendor_tickers = vendor_tickers, # ticker (Quandl) - vendor_fields = ['close'], # which Bloomberg fields to download - cache_algo = 'cache_algo_return') # how to return data + start_date=br.start_date, # start date + finish_date=br.finish_date, # finish date + freq="daily", # daily data + data_source="quandl", # use Quandl as data source + tickers=full_bkt, # ticker (Cuemacro) + fields=["close"], # which fields to download + vendor_tickers=vendor_tickers, # ticker (Quandl) + vendor_fields=["close"], # which Quandl fields to download + cache_algo="cache_algo_return", # how to return data + quandl_api_key=quandl_api_key) asset_df = self.market.fetch_market(market_data_request) @@ -126,8 +135,9 @@ def load_assets(self, br = None): if asset_df is None: import pandas - asset_df = pandas.read_csv("d:/fxcta.csv", index_col=0, parse_dates=['Date'], - date_parser = lambda x: pandas.datetime.strptime(x, '%Y-%m-%d')) + asset_df = pandas.read_csv( + "fxcta.csv", index_col=0, parse_dates=["Date"], + date_parser=lambda x: pandas.datetime.strptime(x, "%Y-%m-%d")) # Signalling variables spot_df = asset_df @@ -144,7 +154,7 @@ def construct_signal(self, spot_df, spot_df2, tech_params, br, run_in_parallel=F # Use technical indicator to create signals # (we could obviously create whatever function we wanted for generating the signal dataframe) tech_ind = TechIndicator() - tech_ind.create_tech_ind(spot_df, 'SMA', tech_params); + tech_ind.create_tech_ind(spot_df, "SMA", tech_params); signal_df = tech_ind.get_signal() return signal_df @@ -154,15 +164,16 @@ def construct_strategy_benchmark(self): ###### FILL IN WITH YOUR OWN BENCHMARK tsr_indices = MarketDataRequest( - start_date = self.br.start_date, # start date - finish_date = self.br.finish_date, # finish date - freq = 'daily', # intraday data - data_source = 'quandl', # use Bloomberg as data source - tickers = ["EURUSD"], # tickers to download - vendor_tickers=['FRED/DEXUSEU'], - fields = ['close'], # which fields to download - vendor_fields = ['close'], - cache_algo = 'cache_algo_return') # how to return data) + start_date=self.br.start_date, # start date + finish_date=self.br.finish_date, # finish date + freq="daily", # daily frequen + data_source="quandl", # use Bloomberg as data source + tickers=["EURUSD"], # tickers to download + vendor_tickers=["FRED/DEXUSEU"], + fields=["close"], # which fields to download + vendor_fields =["close"], + cache_algo="cache_algo_return", # how to return data + quandl_api_key=quandl_api_key) df = self.market.fetch_market(tsr_indices) @@ -170,7 +181,7 @@ def construct_strategy_benchmark(self): return df -if __name__ == '__main__': +if __name__ == "__main__": # Just change "False" to "True" to run any of the below examples @@ -192,8 +203,8 @@ def construct_strategy_benchmark(self): ta = TradeAnalysis() # Create statistics for the model returns using both finmarketpy and pyfolio - ta.run_strategy_returns_stats(model, engine='finmarketpy') - # ta.run_strategy_returns_stats(model, engine='pyfolio') + ta.run_strategy_returns_stats(model, engine="finmarketpy") + # ta.run_strategy_returns_stats(model, engine="pyfolio") # model.plot_strategy_group_benchmark_annualised_pnl() @@ -205,20 +216,20 @@ def construct_strategy_benchmark(self): from finmarketpy.backtest import TradeAnalysis ta = TradeAnalysis() - ta.run_strategy_returns_stats(model, engine='finmarketpy') + ta.run_strategy_returns_stats(model, engine="finmarketpy") # Which backtesting parameters to change # names of the portfolio # broad type of parameter name parameter_list = [ - {'portfolio_vol_adjust': True, 'signal_vol_adjust' : True}, - {'portfolio_vol_adjust': False, 'signal_vol_adjust' : False}] + {"portfolio_vol_adjust": True, "signal_vol_adjust" : True}, + {"portfolio_vol_adjust": False, "signal_vol_adjust" : False}] pretty_portfolio_names = \ - ['Vol target', - 'No vol target'] + ["Vol target", + "No vol target"] - parameter_type = 'vol target' + parameter_type = "vol target" ta.run_arbitrary_sensitivity(strategy, parameter_list=parameter_list, diff --git a/setup.py b/setup.py index 368a62f..d61e4eb 100644 --- a/setup.py +++ b/setup.py @@ -1,28 +1,30 @@ from setuptools import setup, find_packages -long_description = """finmarketpy is a Python based library that enables you to analyze market data and also to backtest -trading strategies using a simple to use API, which has prebuilt templates for you to define backtest.""" +long_description = """finmarketpy is a Python based library that enables you to +analyze market data and also to backtest trading strategies using a simple to +use API, which has prebuilt templates for you to define backtest.""" -setup(name='finmarketpy', - version='0.11.12', - description='finmarketpy is a Python based library for backtesting trading strategies', - author='Saeed Amen', - author_email='saeed@cuemacro.com', - license='Apache 2.0', +setup(name="finmarketpy", + version="0.11.13", + description="finmarketpy is a Python based library for backtesting trading strategies", + author="Saeed Amen", + author_email="saeed@cuemacro.com", + license="Apache 2.0", long_description=long_description, - keywords=['trading', 'markets', 'currencies', 'pandas', 'data', 'Bloomberg', 'tick', 'stocks', 'equities'], - url='https://github.com/cuemacro/finmarketpy', + keywords=["trading", "markets", "currencies", "pandas", "data", + "Bloomberg", "tick", "stocks", "equities"], + url="https://github.com/cuemacro/finmarketpy", packages=find_packages(), include_package_data=True, - install_requires=['pandas', - 'twython', - 'pytz', - 'requests', - 'numpy', - 'multiprocess', - 'seasonal', - 'scikit-learn', - 'matplotlib', - 'numba', - 'financepy==0.220'], + install_requires=["pandas", + "twython", + "pytz", + "requests", + "numpy", + "multiprocess", + "seasonal", + "scikit-learn", + "matplotlib", + "numba", + "financepy==0.310"], zip_safe=False)