-
Notifications
You must be signed in to change notification settings - Fork 8
/
Copy pathtreasury_model.py
1719 lines (1521 loc) · 74.4 KB
/
treasury_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
import warnings
import datetime as dt
import calendar
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pandas.tseries.offsets import *
pd.options.mode.chained_assignment = None
pd.set_option("display.expand_frame_repr", False)
pd.set_option("display.max_rows", None)
def normalize_date_formats(security):
"""
Helper function that takes in a row representing a single security and normalizes date formatting
:param:
security: A singular DataFrame row containing the security to be formatted
:return:
security: The same security row with cleaned dates
"""
# Clean issue date
if "Issue Date" in security.index:
issue_date = security["Issue Date"]
if not pd.isna(issue_date) and not isinstance(issue_date, str):
security["Issue Date"] = issue_date.date()
# Clean maturity date
if "Maturity Date" in security.index:
maturity_date = security["Maturity Date"]
if not pd.isna(maturity_date) and not isinstance(maturity_date, str):
security["Maturity Date"] = maturity_date.date()
elif not pd.isna(maturity_date) and maturity_date[0].isdigit():
security["Maturity Date"] = pd.to_datetime(maturity_date).date()
# Clean coupon payment date(s)
if "Interest Payable Dates" in security.index:
interest_dates = security["Interest Payable Dates"]
if not pd.isna(interest_dates) and not isinstance(interest_dates, str):
security["Interest Payable Dates"] = interest_dates.date()
elif not pd.isna(interest_dates) and interest_dates[0].isdigit():
security["Interest Payable Dates"] = interest_dates.split()
return security
def read_mspd(file_path, include_bills=True):
"""
Takes in an MSPD Excel spreadsheet and extracts outstanding debt information into a DataFrame
:param:
file_path: String containing path to target MSPD file (.xls)
include_bills; Boolean identifying whether to read outstanding bills or not; defaults to True
:return:
full_mspd_df: DataFrame with all outstanding debt information containing normalized dates/units
mspd_pub_date: Datetime value with MSPD publication date
"""
full_mspd_df = pd.read_excel(
file_path, sheet_name="Marketable", usecols="B:E, G:J, L, N, P", header=None
)
full_mspd_df.columns = [
"Category",
"CUSIP",
"Interest Rate",
"Yield",
"Issue Date",
"Maturity Date",
"Interest Payable Dates",
"Issued",
"Inflation Adj",
"Redeemed",
"Outstanding",
]
# Extract MSPD publication date from the top of the 'Marketable' tab
mspd_pub_date = pd.to_datetime(full_mspd_df["Category"][0][54:]).date()
# Find indices of each security type in MSPD and separate into individual data frames
debt_cats = (
"Treasury Bills",
"Treasury Notes",
"Treasury Bonds",
"Treasury Inflation-Protected Securities",
"Treasury Floating Rate Notes",
)
debt_cat_indices = []
is_next_start = (
True # Boolean used to skip over subtotal and grand total rows in MSPD
)
for row in range(full_mspd_df.shape[0]):
# Clean date formats
full_mspd_df.iloc[row] = normalize_date_formats(full_mspd_df.iloc[row])
temp_label = full_mspd_df.iat[row, 0]
if isinstance(temp_label, str):
if temp_label.startswith(debt_cats) and is_next_start:
debt_cat_indices.append(row + 2)
is_next_start = False
elif temp_label.startswith("Total") and not is_next_start:
debt_cat_indices.append(row)
is_next_start = True
# Filter full MSPD down to just relevant outstanding debt categories based on indices found above
full_mspd_df = full_mspd_df.iloc[
np.r_[
debt_cat_indices[0] : debt_cat_indices[1],
debt_cat_indices[2] : debt_cat_indices[3],
debt_cat_indices[4] : debt_cat_indices[5],
debt_cat_indices[6] : debt_cat_indices[7],
debt_cat_indices[8] : debt_cat_indices[9],
]
].reset_index(drop=True)
# Add category labels to each security
for row in range(full_mspd_df.shape[0]):
if row < debt_cat_indices[1] - debt_cat_indices[0]:
full_mspd_df.iloc[row, 0] = "Bills"
elif row < debt_cat_indices[3] - debt_cat_indices[0] - (
debt_cat_indices[2] - debt_cat_indices[1]
):
full_mspd_df.iloc[row, 0] = "Notes"
elif row < debt_cat_indices[5] - debt_cat_indices[0] - (
debt_cat_indices[2] - debt_cat_indices[1]
) - (debt_cat_indices[4] - debt_cat_indices[3]):
full_mspd_df.iloc[row, 0] = "Bonds"
elif row < debt_cat_indices[7] - debt_cat_indices[0] - (
debt_cat_indices[2] - debt_cat_indices[1]
) - (debt_cat_indices[4] - debt_cat_indices[3]) - (
debt_cat_indices[6] - debt_cat_indices[5]
):
full_mspd_df.iloc[row, 0] = "TIPS"
else:
full_mspd_df.iloc[row, 0] = "FRNs"
# Based on include_bills boolean, drop bills
if not include_bills:
full_mspd_df = full_mspd_df[full_mspd_df["Category"] != "Bills"]
# Standardize units to $ billions
full_mspd_df["Issued"] = full_mspd_df["Issued"] / 1000
full_mspd_df["Inflation Adj"] = full_mspd_df["Inflation Adj"] / 1000
full_mspd_df["Redeemed"] = full_mspd_df["Redeemed"] / 1000
full_mspd_df["Outstanding"] = full_mspd_df["Outstanding"] / 1000
# Convert all maturity dates to next business day to account for Treasury weekend/holiday settlement rules
full_mspd_df["Maturity Date"] = (full_mspd_df["Maturity Date"] + 0 * BDay()).dt.date
return full_mspd_df, mspd_pub_date
def read_soma(file_path, include_bills=True):
"""
Takes in a SOMA Excel spreadsheet and extracts outstanding SOMA holdings into a DataFrame
:param:
file_path: String containing path to target SOMA file (.csv)
include_bills; Boolean identifying whether to read SOMA bill holdings or not; defaults to True
:return:
full_soma_df: DataFrame with all outstanding SOMA holdings (normalized dates, units, etc.)
soma_pub_date: Single datetime value with SOMA publication date
"""
full_soma_df = pd.read_csv(file_path, header=0, usecols=[*range(0, 13)])
# Extract SOMA publication date using first value of 'As Of Date' column
soma_pub_date = pd.to_datetime(full_soma_df.iat[0, 0]).date()
# Based on include_bills boolean, drop bills
if include_bills:
full_soma_df = full_soma_df[
full_soma_df["Security Type"].isin(["Bills", "NotesBonds", "FRNs", "TIPS"])
].reset_index(drop=True)
else:
full_soma_df = full_soma_df[
full_soma_df["Security Type"].isin(["NotesBonds", "FRNs", "TIPS"])
].reset_index(drop=True)
# Standardize units to $ billions
full_soma_df["Par Value"] = full_soma_df["Par Value"] / 1000000000
full_soma_df["Inflation Compensation"] = (
full_soma_df["Inflation Compensation"] / 1000000000
)
# Remove surrounding quotation marks around CUSIP
full_soma_df["CUSIP"] = full_soma_df["CUSIP"].str.strip("'")
return full_soma_df, soma_pub_date
def read_issuance(file_path):
"""
Reads in issuance table from the discretionary inputs spreadsheet
:param:
file_path: String containing path to target assumptions file
:return:
full_iss_df: DataFrame containing the issuance table
"""
# Note: If any new securities are added to the issuance table, the usecols field must be updated in the line below
full_iss_df = pd.read_excel(
file_path, sheet_name="Issuance Table", usecols="B:N", skiprows=1, header=0
)
full_iss_df.fillna(0, inplace=True) # Blank cells indicate 0 issuance
# Replace first two columns (MM/YY) of issuance table with single datetime object
full_iss_df.rename(columns={"MM": "Month", "YY": "Year"}, inplace=True)
full_iss_df.insert(
0, "Date", pd.to_datetime(full_iss_df[["Year", "Month"]].assign(DAY=15))
)
full_iss_df.drop(["Year", "Month"], axis=1, inplace=True)
# Generate monthly totals = sum of all coupon issuance each month
full_iss_df["Monthly"] = full_iss_df.iloc[:, 1:].sum(axis=1)
return full_iss_df
def read_disc_assumptions(file_path, date_col):
"""
Reads in all discretionary assumptions (except issuance table, which is read separately above) from Excel input
spreadsheet, and does some basic error checking to make sure user inputs are formatted correctly.
:param:
file_path: String containing path to target input file
date_col: DataFrame column containing list of all months in model horizon
:return:
fy_funding_need: Array of funding needs for each fiscal year over the full model duration
qe_path: Array of QE purchases by month over the full model duration
runoff_caps: Array of monthly SOMA runoff cap for each month over the full model duration
"""
fy_funding_table = pd.read_excel(
file_path, sheet_name="FY Funding Needs", usecols="B:C", skiprows=3, header=0
)
qe_path = pd.read_excel(
file_path, sheet_name="QE Path", usecols="B:C", skiprows=3, header=0
)
runoff_caps = pd.read_excel(
file_path, sheet_name="SOMA Runoff Caps", usecols="B:C", skiprows=3, header=0
)
# Clean formatting of DataFrames read in from Excel
fy_funding_table.dropna(axis=0, how="all", inplace=True)
qe_path.dropna(axis=0, how="all", inplace=True)
runoff_caps.dropna(axis=0, how="all", inplace=True)
fy_funding_table["FY"] = fy_funding_table["FY"].astype("int")
# Store model end date for subsequent error checking
temp_model_end_date = date_col.iat[-1].date()
# Error checks for FY funding needs: input file length, non-negative funding needs, date formatting
if temp_model_end_date > dt.date(int(fy_funding_table["FY"].iat[-1]), 9, 15):
warnings.warn(
"Warning: Funding need assumptions have not been provided for all necessary fiscal years. Update "
"the file 'discretionary_assumptions.xlsx' to ensure a funding need is provided for each fiscal "
"year of the issuance table and restart the model.",
stacklevel=10,
)
if (fy_funding_table["Funding Need ($ bn)"].values < 0).any():
warnings.warn(
"One or more FY funding needs has been entered as a negative number. Check to ensure this is "
"intended - generally, Treasury borrowing needs should be specified using positive numbers.",
stacklevel=10,
)
if not pd.api.types.is_numeric_dtype(fy_funding_table["FY"]):
warnings.warn(
"One or more dates in the FY Funding Need assumptions sheet has been entered incorrectly. Update "
"this sheet in the file 'discretionary_assumptions.xlsx' and restart the model.",
stacklevel=10,
)
# Error checks for QE path: input file length, non-negative values, date formatting
if temp_model_end_date > qe_path["Month"].iat[-1]:
warnings.warn(
"Warning: QE path has not been provided for all necessary months. Update these inputs in the file"
" 'discretionary_assumptions.xlsx' to ensure each month has an associated QE path assumption and "
"restart the model.",
stacklevel=10,
)
if (qe_path["QE Purchases ($ bn)"].values < 0).any():
warnings.warn(
"Warning: QE purchases path cannot contain negative values. For QT, use the SOMA Runoff Caps "
"sheet instead to parametrize the pace of balance sheet reduction. Update the QE path in the "
"file 'discretionary_assumptions.xlsx' and restart the model.",
stacklevel=10,
)
if not pd.api.types.is_datetime64_any_dtype(qe_path["Month"]):
warnings.warn(
"One or more years dates in the QE Path assumptions sheet has been entered incorrectly. Update "
"this sheet in the file 'discretionary_assumptions.xlsx' and restart the model.",
stacklevel=10,
)
if not (
qe_path["Month"].dt.day[0] == 15
and (qe_path["Month"].dt.day == qe_path["Month"].dt.day[0]).all()
):
warnings.warn(
"One or more months in the QE path assumptions has been entered using a non mid-month date. All "
"months should specifically be entered as the 15th of that month; update this in the file "
"'discretionary_inputs.xlsx' and restart the model.",
stacklevel=10,
)
# Error checks for SOMA Runoff Caps: input file length, non-negative values, date formatting
if temp_model_end_date > runoff_caps["Month"].iat[-1]:
warnings.warn(
"Warning: SOMA runoff caps have not been provided for all necessary months. Update these inputs "
"in the file 'discretionary_assumptions.xlsx' to ensure each month has an associated runoff cap "
"and restart the model.",
stacklevel=10,
)
if (runoff_caps["Runoff Cap ($ bn)"].values < 0).any():
warnings.warn(
"Warning: SOMA Runoff Caps cannot contain negative values. For QE, use the QE Path sheet instead "
"to specify the pace of QE purchases. Update the runoff caps in the file "
"'discretionary_assumptions.xlsx' and restart the model.",
stacklevel=10,
)
if not pd.api.types.is_datetime64_any_dtype(runoff_caps["Month"]):
warnings.warn(
"One or more years dates in the SOMA Runoff Caps assumptions sheet has been entered incorrectly. "
"Update this sheet in the file 'discretionary_assumptions.xlsx' and restart the model.",
stacklevel=10,
)
if not (
runoff_caps["Month"].dt.day[0] == 15
and (runoff_caps["Month"].dt.day == runoff_caps["Month"].dt.day[0]).all()
):
warnings.warn(
"One or more months in the SOMA Runoff Caps path assumptions has been entered using a non "
"mid-month date. All months should specifically be entered as the 15th of that month; update this"
" in the file 'discretionary_inputs.xlsx' and restart the model.",
stacklevel=10,
)
return fy_funding_table, qe_path, runoff_caps
def read_third_party_inputs(file_path, start_date, end_date, infl_rate_type="Mid"):
"""
Read in inflation spot rates and forward curve from third party data input spreadsheet
:param:
file_path: String containing path to target file
start_date: Date object containing model jump off point
end_date: Date object containing end date of model horizon
infl_rate_type: Must be "Bid"/"Mid"/"Ask", optional parameter to select which spot rate to use; defaults to Mid
:return:
spot_rates_dict: Dictionary containing yearly spot rates
forward_curve: DataFrame containing forward curve matrix
monthly_borrowing_distribution: DataFrame containing average monthly distribution of fiscal funding
"""
# Checks if input date matches MSPD date to prevent time-lag errors and throw warning if not
temp_date = (
pd.read_excel(file_path, sheet_name="Forward Curve", usecols="C", nrows=3)
.iat[2, 0]
.date()
)
if not temp_date == start_date:
warnings.warn(
"Warning: Input file date does not match MSPD date. Update this value in the file "
"'market_data.xlsx' now and then restart the program.",
stacklevel=10,
)
# Read each sheet of the inputted file
forward_curve = pd.read_excel(
file_path, sheet_name="Forward Curve", usecols="B:T", skiprows=5, nrows=12
)
term_premium = pd.read_excel(
file_path, sheet_name="Term Premium", usecols="B:T", skiprows=5, nrows=12
)
infl_swaps_df = pd.read_excel(
file_path, sheet_name="Inflation Swaps", usecols="B:E", skiprows=5, nrows=16
)
monthly_borrowing_distribution = pd.read_excel(
file_path,
sheet_name="Monthly Borrowing Distribution",
usecols="B:C",
skiprows=3,
nrows=12,
)
# Error checking for market data inputs: N/A values
if (
np.concatenate(
[
forward_curve[col].astype("str").str.contains(r"#N/A", na=False)
for col in forward_curve
]
).sum()
> 0
):
warnings.warn(
"One or more entries in the Forward Curve input file is returning an N/A value. Update this"
"value in the file 'market_data.xlsx' now and then restart the program.",
stacklevel=10,
)
if (
np.concatenate(
[
infl_swaps_df[col].astype("str").str.contains(r"#N/A", na=False)
for col in infl_swaps_df
]
).sum()
> 0
):
warnings.warn(
"One or more entries in the Inflation Swaps input sheet is returning an N/A value. Update this"
" value in the file 'market_data.xlsx' now and then restart the program.",
stacklevel=10,
)
# Error checking for monthly borrowing distribution: sum must equal 1
if monthly_borrowing_distribution["Avg % of Fiscal Funding Need Filled"].sum() != 1:
warnings.warn(
"Monthly borrowing distribution of FY funding needs does not sum to 1. This is causing funding "
"needs to be assigned incorrectly over the course of the model. Update these values in the file "
"'market_data.xlsx' now and then restart the program.",
stacklevel=10,
)
# Add term premium to forward curve
forward_curve.iloc[:, 1:] = forward_curve.iloc[:, 1:].add(term_premium.iloc[:, 1:])
# Pull spot rates into a dictionary based on zero coupon inflation swaps data
spot_rates_dict = {}
temp_model_length_yrs = end_date.year - start_date.year + 1
for row in range(temp_model_length_yrs):
spot_rates_dict[infl_swaps_df.iloc[row]["Tenor"]] = infl_swaps_df.iloc[row][
infl_rate_type
]
return spot_rates_dict, forward_curve, monthly_borrowing_distribution
def gen_monthly_borrowing(borrowing_yearly, date_col, monthly_dist):
"""
Apportion FY borrowing assumptions into monthly borrowing needs based on historic seasonal distribution input
:param:
borrowing_yearly: DataFrame of yearly borrowing amounts by fiscal year
date_col: Column of all months in the model horizon
monthly_dist: DataFrame containing monthly seasonal distributions of the borrowing need (as %'s)
:return:
borrowing_monthly_df: DataFrame of monthly borrowing need
"""
borrowing_monthly = []
# Adjust current FY distribution percentages to reflect only the remaining portion of the FY
first_year_dist = monthly_dist.copy()
temp_start = first_year_dist.index[
first_year_dist["Month"] == calendar.month_abbr[date_col[0].month]
].tolist()[0]
first_year_dist = first_year_dist[temp_start:].reset_index(drop=True)
first_year_dist.iloc[:, 1] = (
first_year_dist.iloc[:, 1] / first_year_dist.iloc[:, 1].sum()
)
for date in date_col:
# Current FY borrowing needs scaled using previously adjusted current FY distributions
if date <= dt.date(borrowing_yearly.iat[0, 0], 9, 15):
temp_month_abbr = calendar.month_abbr[date.month]
monthly_adj = first_year_dist[
first_year_dist["Month"] == temp_month_abbr
].iat[0, 1]
borrowing_monthly.append(borrowing_yearly.iat[0, 1] * monthly_adj)
continue
# Remaining FYs assign monthly funding need proportional to unadjusted seasonal distribution
for i in range(1, borrowing_yearly.shape[0]):
start_date = dt.date(borrowing_yearly.iat[i - 1, 0], 9, 30)
end_date = dt.date(borrowing_yearly.iat[i, 0], 9, 30)
if start_date < date <= end_date:
temp_month_abbr = calendar.month_abbr[date.month]
monthly_adj = monthly_dist[
monthly_dist["Month"] == temp_month_abbr
].iat[0, 1]
borrowing_monthly.append(borrowing_yearly.iat[i, 1] * monthly_adj)
borrowing_monthly_df = pd.DataFrame(borrowing_monthly, columns=["Funding Need"])
return borrowing_monthly_df
def gen_fwd_inflation_rates(inflation_spot_rates):
"""
Compute Ny1Y forward rates for inflation implied by a given set of spot rates; used to scale TIPS each period
:param:
inflation_spot_rates: Dictionary of spot rates over the full model horizon
:return:
monthly_adj: Array of total inflation change from model jump-off point up to that given month
monthly_deltas: Array of specific monthly inflation rates (e.g. monthly deltas of the monthly_adj array)
"""
result_rates = pd.DataFrame.from_dict(
inflation_spot_rates, orient="index", columns=["Spot"]
)
result_rates["Spot"] = result_rates["Spot"].div(
100
) # Convert inputs to percentages
result_rates["Ny1y Fwd"] = 0.00
# Set up first year values separately to allow for computation of total inflation in monthly_adj
temp_monthly_val = (1 + result_rates.iat[0, 0]) ** (1 / 12)
monthly_adj = [temp_monthly_val]
monthly_deltas = [temp_monthly_val]
# First year inflation calculated using 1y Spot rate
for i in range(11):
# Convert annualized rates to monthly rates
temp_monthly_val = (1 + result_rates.iat[0, 0]) ** (1 / 12)
monthly_adj.append(monthly_adj[-1] * temp_monthly_val)
monthly_deltas.append(temp_monthly_val)
# Remaining years using Ny1y Fwd Rates
for i in range(0, result_rates.shape[0] - 1):
# Generate Ny1y Fwd Rate using corresponding spot rates
result_rates.iat[i, 1] = (i + 2) * result_rates.iat[i + 1, 0] - (
i + 1
) * result_rates.iat[i, 0]
# Convert annualized rates to monthly rates
temp_monthly_val = (1 + result_rates.iat[i, 1]) ** (1 / 12)
for j in range(12):
monthly_adj.append(monthly_adj[-1] * temp_monthly_val)
monthly_deltas.append(temp_monthly_val)
return monthly_adj, monthly_deltas
def gen_monthly_coupons(forward_curve, gross_issuance):
"""
Takes in a matrix representing the forward curve and returns a DataFrame containing implied monthly coupons for
each future security to be generated implied from the issuance table.
:param:
forward_curve: DataFrame representing the forward curve
gross_issuance: DataFrame containing issuance table
:return:
coupon_table: DataFrame in same shape as issuance table containing monthly coupons for each security.
frn_path: Array containing front of curve values at each month (used to adjust FRNs each period)
"""
coupon_table = gross_issuance.copy()
# Drop monthly summation column
coupon_table.drop("Monthly", axis=1, inplace=True)
# Generate table of coupon values for each tenor at each month of forward issuance
for col in range(1, coupon_table.shape[1]):
temp_coupon_rates = [np.nan] * (
coupon_table.shape[0] + 1
) # +1 to allow for interpolation of final year values
# Pull forward rates for the specific tenor; if FRNs, pull forward rates for front of curve instead
if coupon_table.columns[col].split(" ")[-1] == "FRN":
temp_fwd_rates = forward_curve[forward_curve["Tenor"] == "1M"]
else:
temp_fwd_rates = forward_curve[
forward_curve["Tenor"] == coupon_table.columns[col].split(" ")[0]
]
# Create an array of monthly values (matching issuance length) based on interpolation of forward rates
temp_loc = 0
for i in range(1, temp_fwd_rates.shape[1]):
if temp_fwd_rates.columns[i][-1] == "M":
temp_loc = int(temp_fwd_rates.columns[i][:-1])
elif temp_fwd_rates.columns[i][-1] == "Y":
temp_loc = int(temp_fwd_rates.columns[i][:-1]) * 12
if temp_loc < len(temp_coupon_rates):
temp_coupon_rates[temp_loc] = temp_fwd_rates.iat[0, i]
else:
break
temp_coupon_rates = (
pd.Series(temp_coupon_rates).interpolate().to_numpy().tolist()[:-1]
)
coupon_table.iloc[:, col] = temp_coupon_rates
# Store FRN path to return at end of method before subsequent calculations
frn_path = coupon_table.iloc[:, -1].copy()
# Adjust coupon table to account for reopenings and month/tenor combinations with 0 issuance
for col in range(5, coupon_table.shape[1]):
for row in range(coupon_table.shape[0]):
# If there is no issuance for a given tenor/period combination, set coupon to N/A
if gross_issuance.iat[row, col] == 0:
coupon_table.iat[row, col] = np.nan
continue
# Assign coupons for reopenings to match the originally issued security's coupon
temp_month = coupon_table.iat[row, 0].date().month
# 10Y, 20Y, 30Y reopenings
if len(coupon_table.columns[col]) == 3:
if row > 1 and (temp_month % 3 == 0 or temp_month % 3 == 1):
coupon_table.iat[row, col] = coupon_table.iat[row - 1, col]
# TIPS reopenings
elif coupon_table.columns[col][-1] == "P":
if row > 1 and (
temp_month == 3
or temp_month == 6
or temp_month == 9
or temp_month == 12
):
coupon_table.iat[row, col] = coupon_table.iat[row - 2, col]
elif row > 3 and (temp_month == 5 or temp_month == 11):
coupon_table.iat[row, col] = coupon_table.iat[row - 4, col]
elif row > 5 and temp_month == 8:
coupon_table.iat[row, col] = coupon_table.iat[row - 6, col]
# FRNs reopenings
elif coupon_table.columns[col][-1] == "N":
if row > 1 and (temp_month % 3 == 0 or temp_month % 3 == 2):
coupon_table.iat[row, col] = coupon_table.iat[row - 1, col]
return coupon_table, frn_path
def join_with_soma(os, soma_holdings):
"""
Joins SOMA data with MSPD data on CUSIP to consolidate into one DataFrame
:param:
os: DataFrame containing outstanding stock of debt from MSPD
soma: DataFrame containing current SOMA holdings from Fed report
:return:
results: DataFrame joining MSPD and SOMA based on CUSIP, simplified to only show desired columns
bills_row: Synthetic row containing total outstanding stock of bills as of the model jump-off point
"""
# Group MSPD by CUSIP and sum amounts outstanding to combine reopenings into a single security
simple_os = pd.DataFrame(
os.groupby(["Category", "CUSIP"], sort=False).agg(
{
"Interest Rate": ["first"],
"Issue Date": ["first"],
"Maturity Date": ["first"],
"Issued": ["sum"],
"Outstanding": ["sum"],
}
)
).reset_index()
# Add SOMA inflation compensation field onto par value
soma_holdings["Par Value"] = soma_holdings[
["Par Value", "Inflation Compensation"]
].sum(axis=1)
# Simplify both tables to only include desired columns before joining
simple_os.columns = [
"Category",
"CUSIP",
"Interest Rate",
"Issue Date",
"Maturity Date",
"Issued",
"Amt Outstanding",
]
simple_soma = soma_holdings[["CUSIP", "Par Value", "Percent Outstanding"]]
# Merge SOMA and MSPD DataFrames (outer join to preserve securities not held by Fed)
results = simple_os.merge(simple_soma, on="CUSIP", how="outer").rename(
columns={
"Par Value": "SOMA Par Value",
"Percent Outstanding": "SOMA Percent Outstanding",
}
)
# Drop securities that existed in SOMA file but not in MSPD (already matured holdings not updated in SOMA yet)
# For example, given a Dec 30 SOMA and Dec 31 MSPD, drop all securities in SOMA maturing on Dec 31 itself
results = results[results["Category"].notna()]
results.fillna(0, inplace=True)
results["Amt Ex-SOMA"] = results["Amt Outstanding"] - results["SOMA Par Value"]
# Aggregate all bills into a singular "synthetic bills" row, then drop bills from joined table
bills_row = pd.DataFrame(
results[results["Category"] == "Bills"].groupby(["Category"], sort=False).sum()
).reset_index()
results = results[results["Category"] != "Bills"].reset_index(drop=True)
return results, bills_row
def gen_maturity_table(gross_issuance):
"""
Takes in a projected issuance table and generates a corresponding table of maturity dates for each security.
Maturity dates are adjusted to reflect Treasury's rules for weekend/holiday settlements.
:param:
gross_issuance: DataFrame containing issuance table
:return:
maturities: DataFrame in same shape as issuance table containing maturity dates of each future security
"""
maturities = gross_issuance.copy()
# Drop summation column
maturities.drop("Monthly", axis=1, inplace=True)
# Loops over issuance table by security tenor
for col in range(1, maturities.shape[1]):
for row in range(maturities.shape[0]):
temp_date = maturities.iat[row, 0].date()
temp_month = temp_date.month
# Generate maturity dates only where there is issuance
# Nested if/else statements account for reopenings (if reopening, assign original issue's maturity date)
if not gross_issuance.iat[row, col] == 0:
# Notes; 2Y/5Y/7Y mature EOM and 3Y matures mid-month
if len(gross_issuance.columns[col]) == 2:
temp_mat = dt.date(
year=(temp_date.year + int(gross_issuance.columns[col][0])),
month=temp_date.month,
day=temp_date.day,
)
if gross_issuance.columns[col][0] != "3":
temp_mat = dt.date(
year=temp_mat.year,
month=temp_mat.month,
day=(temp_mat + MonthEnd()).day,
)
# Bonds; 20Y matures EOM and 10Y/30Y mature mid-month
elif len(gross_issuance.columns[col]) == 3:
if temp_month % 3 == 0:
# Handles case where first occurrence of a security in the issuance table is a reopening
if row == 0:
if temp_date.month > 1:
temp_mat = dt.date(
year=(
temp_date.year
+ int(gross_issuance.columns[col][:2])
),
month=(temp_date.month - 1),
day=temp_date.day,
)
else:
temp_mat = dt.date(
year=(
temp_date.year
+ int(gross_issuance.columns[col][:2])
),
month=(temp_date.month - 1 + 12),
day=temp_date.day,
)
if gross_issuance.columns[col][0] == 2:
temp_mat = dt.date(
year=temp_mat.year,
month=temp_mat.month,
day=(temp_mat + MonthEnd()).day,
)
else:
temp_mat = maturities.iat[row - 1, col]
elif temp_month % 3 == 1:
# Handles case where first occurrence of a security in the issuance table is a reopening
if row == 0:
if temp_date.month > 2:
temp_mat = dt.date(
year=(
temp_date.year
+ int(gross_issuance.columns[col][:2])
),
month=(temp_date.month - 2),
day=temp_date.day,
)
else:
temp_mat = dt.date(
year=(
temp_date.year
+ int(gross_issuance.columns[col][:2])
),
month=(temp_date.month - 2 + 12),
day=temp_date.day,
)
if gross_issuance.columns[col][0] == 2:
temp_mat = dt.date(
year=temp_mat.year,
month=temp_mat.month,
day=(temp_mat + MonthEnd()).day,
)
else:
temp_mat = maturities.iat[row - 1, col]
else:
temp_mat = dt.date(
year=(
temp_date.year + int(gross_issuance.columns[col][:2])
),
month=temp_date.month,
day=temp_date.day,
)
if gross_issuance.columns[col][0] == 2:
temp_mat = dt.date(
year=temp_mat.year,
month=temp_mat.month,
day=(temp_mat + MonthEnd()).day,
)
# TIPS; all TIPS mature mid-month
elif gross_issuance.columns[col][-1] == "P":
if temp_month % 3 == 0:
# Handles case where first occurrence of a security in the issuance table is a reopening
if row < 2:
temp_mat = dt.date(
year=(
temp_date.year
+ int(gross_issuance.columns[col][:-5])
),
month=(temp_date.month - 2),
day=temp_date.day,
)
else:
temp_mat = maturities.iat[row - 2, col]
elif temp_month == 5 or temp_month == 11:
# Handles case where first occurrence of a security in the issuance table is a reopening
if row < 4:
temp_mat = dt.date(
year=(
temp_date.year
+ int(gross_issuance.columns[col][:-5])
),
month=(temp_date.month - 4),
day=temp_date.day,
)
else:
temp_mat = maturities.iat[row - 4, col]
elif temp_month == 8:
# Handles case where first occurrence of a security in the issuance table is a reopening
if row < 6:
temp_mat = dt.date(
year=(
temp_date.year
+ int(gross_issuance.columns[col][:-5])
),
month=(temp_date.month - 6),
day=temp_date.day,
)
else:
temp_mat = maturities.iat[row - 6, col]
else:
temp_mat = dt.date(
year=(
temp_date.year + int(gross_issuance.columns[col][:-5])
),
month=temp_date.month,
day=temp_date.day,
)
# FRNs; all FRNs mature EOM
else:
if temp_month % 3 == 0:
# Handles case where first occurrence of a security in the issuance table is a reopening
if row == 0:
if temp_date.month > 2:
temp_mat = dt.date(
year=(
temp_date.year
+ int(gross_issuance.columns[col][:-5])
),
month=(temp_date.month - 2),
day=temp_date.day,
)
else:
temp_mat = dt.date(
year=(
temp_date.year
+ int(gross_issuance.columns[col][:-5])
),
month=(temp_date.month - 2 + 12),
day=temp_date.day,
)
temp_mat = dt.date(
year=temp_mat.year,
month=temp_mat.month,
day=(temp_mat + MonthEnd()).day,
)
else:
temp_mat = maturities.iat[row - 1, col]
elif temp_month % 3 == 2:
# Handles case where first occurrence of a security in the issuance table is a reopening
if row == 0:
if temp_date.month > 1:
temp_mat = dt.date(
year=(
temp_date.year
+ int(gross_issuance.columns[col][:-5])
),
month=(temp_date.month - 1),
day=temp_date.day,
)
else:
temp_mat = dt.date(
year=(
temp_date.year
+ int(gross_issuance.columns[col][:-5])
),
month=(temp_date.month - 1 + 12),
day=temp_date.day,
)
temp_mat = dt.date(
year=temp_mat.year,
month=temp_mat.month,
day=(temp_mat + MonthEnd()).day,
)
else:
temp_mat = maturities.iat[row - 1, col]
else:
temp_mat = dt.date(
year=(
temp_date.year + int(gross_issuance.columns[col][:-5])
),
month=temp_date.month,
day=temp_date.day,
)
temp_mat = dt.date(
year=temp_mat.year,
month=temp_mat.month,
day=(temp_mat + MonthEnd()).day,
)
temp_mat = (
temp_mat + 0 * BDay()
).date() # Set maturity date to next business day if weekend/holiday
maturities.iat[row, col] = temp_mat
# For months with 0 issuance, set maturity date to N/A
maturities.replace(0, np.nan, inplace=True)
return maturities
def gen_future_debt_stock(gross_issuance, coupon_table):
"""
Takes in a table of projected issuance and generates a table of all the implied future securities. These are
formatted to match the structure of securities in the MSPD.
:param:
gross_issuance: DataFrame containing issuance table
coupon_table: DataFrame in same shape as issuance table containing coupons to be assigned to each tenor
:return:
future_stock_df: DataFrame containing all future securities implied by the issuance table in MSPD form
"""
future_stock = []
maturity_table = gen_maturity_table(gross_issuance)
# Loops over issuance table by column/security type and generate a new CUSIP security row for each entry
for col in range(1, gross_issuance.shape[1] - 1):
for row in range(gross_issuance.shape[0]):
temp_date = gross_issuance.iat[row, 0].date()
temp_month = temp_date.month
temp_issue = temp_date
temp_cusip = "CUSIP" + str(row) + str(col)
temp_coupon = coupon_table.iat[row, col]
temp_mat = maturity_table.iat[row, col]
# Generate future securities only where there is issuance
# Nested if/else statements account for reopenings (if reopening, assign original issue's CUSIP/maturity)
if not (gross_issuance.iat[row, col] == 0):
# Notes (2Y, 5Y, 7Y); 3Y notes are issued mid-month, 2Y/5Y/7Y notes are issued EOM
if len(gross_issuance.columns[col]) == 2:
temp_cat = "Notes"
if gross_issuance.columns[col][0] != "3":
temp_issue = dt.date(
year=temp_date.year,
month=temp_date.month,
day=(temp_date + MonthEnd()).day,
)
temp_issue = (temp_issue + 0 * BDay()).date()
# Bonds (10Y, 20Y, 30Y); 10Y/30Y bonds are issued mid-month, 20Y bonds are issued EOM
elif len(gross_issuance.columns[col]) == 3:
temp_cat = "Bonds"
# Handles case where first occurrence of nominal bond is a reopening
if temp_month % 3 == 0 or temp_month % 3 == 1:
temp_cusip = future_stock[-1].get("CUSIP")
if gross_issuance.columns[col][:2] == "20":
temp_issue = dt.date(
year=temp_date.year,
month=temp_date.month,
day=(temp_date + MonthEnd()).day,
)
temp_issue = (temp_issue + 0 * BDay()).date()
# TIPS; all TIPS are issued on the last business day of the month
elif gross_issuance.columns[col][-1] == "P":
temp_cat = "TIPS"
# Handles case where first occurrence of a TIPS is a reopening
if not (temp_month == 1 or temp_month == 2 or temp_month == 7):
temp_cusip = future_stock[-1].get("CUSIP")