This notebook offers a step to step guide to create a hierarchical forecasting pipeline.

In the pipeline we will use HierarchicalForecast and StatsForecast core class, to create base predictions, reconcile and evaluate them.

We will use the TourismL dataset that summarizes large Australian national visitor survey.

Outline 1. Installing Packages 2. Prepare TourismL dataset - Read and aggregate - StatsForecast’s Base Predictions 3. Reconciliar 4. Evaluar

Open In Colab

1. Installing HierarchicalForecast

We assume you have StatsForecast and HierarchicalForecast already installed, if not check this guide for instructions on how to install HierarchicalForecast.

# %%capture
# !pip install hierarchicalforecast
# !pip install -U numba statsforecast datasetsforecast
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt

from statsforecast.core import StatsForecast
from statsforecast.models import AutoARIMA, Naive

from hierarchicalforecast.core import HierarchicalReconciliation
from hierarchicalforecast.evaluation import HierarchicalEvaluation
from hierarchicalforecast.methods import BottomUp, TopDown, MinTrace, ERM

from hierarchicalforecast.utils import is_strictly_hierarchical
from hierarchicalforecast.utils import HierarchicalPlot, CodeTimer
from hierarchicalforecast.evaluation import scaled_crps, msse, energy_score

from datasetsforecast.hierarchical import HierarchicalData, HierarchicalInfo
/Users/cchallu/opt/anaconda3/envs/hierarchicalforecast/lib/python3.10/site-packages/statsforecast/core.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
  from tqdm.autonotebook import tqdm

2. Preparing TourismL Dataset

2.1 Read Hierarchical Dataset

# ['Labour', 'Traffic', 'TourismSmall', 'TourismLarge', 'Wiki2']
dataset = 'TourismSmall' # 'TourismLarge'
verbose = True
intervals_method = 'bootstrap'
LEVEL = np.arange(0, 100, 2)
qs = [[50-lv/2, 50+lv/2] for lv in LEVEL]
QUANTILES = np.sort(np.concatenate(qs)/100)
with CodeTimer('Read and Parse data   ', verbose):
    print(f'{dataset}')
    if not os.path.exists('./data'):
        os.makedirs('./data')
    
    dataset_info = HierarchicalInfo[dataset]
    Y_df, S_df, tags = HierarchicalData.load(directory=f'./data/{dataset}', group=dataset)
    Y_df['ds'] = pd.to_datetime(Y_df['ds'])

    # Train/Test Splits
    horizon = dataset_info.horizon
    seasonality = dataset_info.seasonality
    Y_test_df = Y_df.groupby('unique_id').tail(horizon)
    Y_train_df = Y_df.drop(Y_test_df.index)
    Y_test_df = Y_test_df.set_index('unique_id')
    Y_train_df = Y_train_df.set_index('unique_id')
TourismSmall
100%|██████████| 1.30M/1.30M [00:00<00:00, 2.74MiB/s]
INFO:datasetsforecast.utils:Successfully downloaded datasets.zip, 1297279, bytes.
INFO:datasetsforecast.utils:Decompressing zip file...
INFO:datasetsforecast.utils:Successfully decompressed data/TourismSmall/hierarchical/datasets.zip
Code block 'Read and Parse data   ' took:   0.99873 seconds
dataset_info.seasonality
4
hplot = HierarchicalPlot(S=S_df, tags=tags)
hplot.plot_summing_matrix()

Y_train_df
dsy
unique_id
total1998-03-3184503
total1998-06-3065312
total1998-09-3072753
total1998-12-3170880
total1999-03-3186893
nt-oth-noncity2003-12-31132
nt-oth-noncity2004-03-3112
nt-oth-noncity2004-06-3040
nt-oth-noncity2004-09-30186
nt-oth-noncity2004-12-31144

2.2 StatsForecast’s Base Predictions

This cell computes the base predictions Y_hat_df for all the series in Y_df using StatsForecast’s AutoARIMA. Additionally we obtain insample predictions Y_fitted_df for the methods that require them.

with CodeTimer('Fit/Predict Model     ', verbose):
    # Read to avoid unnecesary AutoARIMA computation
    yhat_file = f'./data/{dataset}/Y_hat.csv'
    yfitted_file = f'./data/{dataset}/Y_fitted.csv'

    if os.path.exists(yhat_file):
        Y_hat_df = pd.read_csv(yhat_file)
        Y_fitted_df = pd.read_csv(yfitted_file)

        Y_hat_df = Y_hat_df.set_index('unique_id')
        Y_fitted_df = Y_fitted_df.set_index('unique_id')

    else:
        fcst = StatsForecast(
            df=Y_train_df, 
            models=[AutoARIMA(season_length=seasonality)],
            fallback_model=[Naive()],
            freq='M', 
            n_jobs=-1
        )
        Y_hat_df = fcst.forecast(h=horizon, fitted=True, level=LEVEL)
        Y_fitted_df = fcst.forecast_fitted_values()
        Y_hat_df.to_csv(yhat_file)
        Y_fitted_df.to_csv(yfitted_file)
Y_hat_df
dsAutoARIMAAutoARIMA-lo-98AutoARIMA-lo-96AutoARIMA-lo-94AutoARIMA-lo-92AutoARIMA-lo-90AutoARIMA-lo-88AutoARIMA-lo-86AutoARIMA-lo-84AutoARIMA-hi-80AutoARIMA-hi-82AutoARIMA-hi-84AutoARIMA-hi-86AutoARIMA-hi-88AutoARIMA-hi-90AutoARIMA-hi-92AutoARIMA-hi-94AutoARIMA-hi-96AutoARIMA-hi-98
unique_id
bus2005-01-319673.4248057436.3564457698.4936527864.8115237989.9257818091.6962898178.3193368254.2705088322.27636710905.79394510962.72558611024.57324211092.57910211168.53027311255.15332011356.92382811482.03808611648.35644511910.493164
bus2005-02-2810393.9003918156.8315438418.9687508585.2871098710.4013678812.1718758898.7949228974.7460949042.75195311626.26953111683.20019511745.04882811813.05468811889.00585911975.62890612077.39941412202.51367212368.83203112630.968750
bus2005-03-3112028.1347669791.06640610053.20410210219.52148410344.63574210446.40625010533.02929710608.98144510676.98632813260.50390613317.43554713379.28320313447.28906213523.24023413609.86328113711.63378913836.74804714003.06640614265.203125
bus2005-04-3010995.6796888758.6103529020.7480479187.0654309312.1796889413.9511729500.5742199576.5253919644.53125012228.04785212284.97949212346.82812512414.83300812490.78515612577.40722712679.17871112804.29296912970.61035213232.748047
bus2005-05-319673.4248057262.0854497544.6435557723.9174807858.7783207968.4775398061.8486338143.7167978217.01953111001.79687511063.16406211129.83007811203.13281211285.00097711378.37207011488.07128911622.93261711802.20605512084.764648
wa-vfr-noncity2005-04-30904.125549463.371521515.018616547.787048572.437439592.488647609.555359624.519531637.9182131146.9305421158.1473391170.3328861183.7315671198.6956791215.7624511235.8137211260.4641111293.2325441344.879517
wa-vfr-noncity2005-05-31904.125549457.607361509.929901543.126831568.099670588.413086605.703003620.862854634.4367071150.1059571161.4694821173.8143311187.3881841202.5480961219.8380131240.1514891265.1242681298.3211671350.643677
wa-vfr-noncity2005-06-30904.125549451.916687504.906036538.526062563.817139584.389465601.899719617.252808630.9996341153.2409671164.7492681177.2514651190.9982911206.3514401223.8616941244.4339601269.7249761303.3450931356.334473
wa-vfr-noncity2005-07-31904.125549446.296722499.944611533.982483559.587830580.415833598.143738613.687622627.6052861156.3369141167.9881591180.6457521194.5634771210.1074221227.8353271248.6632081274.2686771308.3065191361.954346
wa-vfr-noncity2005-08-31904.125549440.744904495.043365529.493958555.409851576.490417594.433289610.165649624.2521361159.3952641171.1878661183.9990231198.0854491213.8178711231.7607421252.8413091278.7570801313.2077641367.506226
Y_fitted_df
dsyAutoARIMAAutoARIMA-lo-98AutoARIMA-lo-96AutoARIMA-lo-94AutoARIMA-lo-92AutoARIMA-lo-90AutoARIMA-lo-88AutoARIMA-lo-86AutoARIMA-hi-80AutoARIMA-hi-82AutoARIMA-hi-84AutoARIMA-hi-86AutoARIMA-hi-88AutoARIMA-hi-90AutoARIMA-hi-92AutoARIMA-hi-94AutoARIMA-hi-96AutoARIMA-hi-98
unique_id
bus1998-03-319815.09805.1845707568.6489267830.7241217997.0019538122.0864268223.8330088310.4355478386.36914111037.26074211094.17871111156.01171911224.00097711299.93457011386.53710911488.28320311613.36816411779.64648412041.720703
bus1998-06-3011823.011811.1767589574.6406259836.71582010002.99414110128.07812510229.82519510316.42773410392.36132813043.25293013100.16992213162.00390613229.99316413305.92675813392.52832013494.27539113619.36035213785.63769514047.712891
bus1998-09-3013565.013551.43457011314.89941411576.97363311743.25195311868.33691411970.08300812056.68554712132.61914114783.51074214840.42871114902.26171914970.25097715046.18457015132.78710915234.53320315359.61816415525.89648415787.970703
bus1998-12-3111478.011466.5224619229.9863289492.0605479658.3388679783.4238289885.1699229971.77246110047.70605512698.59765612755.51562512817.34863312885.33789112961.27148413047.87402313149.62011713274.70507813440.98339813703.057617
bus1999-03-3110027.09845.0117197608.4755867870.5507818036.8286138161.9130868263.6601568350.2626958426.19531211077.08691411134.00488311195.83886711263.82812511339.76074211426.36328111528.11035211653.19433611819.47265612081.547852
wa-vfr-noncity2003-12-311177.0927.351196504.362732553.928040585.375671609.032471628.275513644.654297659.0153201160.3695071171.1341551182.8284911195.6870121210.0480961226.4268801245.6699221269.3266601300.7742921350.339600
wa-vfr-noncity2004-03-31956.0969.565552546.577087596.142456627.590027651.246887670.489868686.868652701.2296751202.5838621213.3485111225.0428471237.9014891252.2624511268.6412351287.8842771311.5410161342.9886471392.554077
wa-vfr-noncity2004-06-30772.0967.268921544.280457593.845764625.293396648.950195668.193237684.572021698.9330441200.2871091211.0518801222.7462161235.6047361249.9658201266.3446041285.5876461309.2443851340.6920171390.257324
wa-vfr-noncity2004-09-30885.0934.251831511.263336560.828674592.276306615.933105635.176086651.554932665.9159551167.2700201178.0347901189.7291261202.5876461216.9487301233.3275151252.5705571276.2272951307.6749271357.240234
wa-vfr-noncity2004-12-31797.0925.923462502.934998552.500305583.947937607.604736626.847778643.226562657.5875851158.9417721169.7064211181.4007571194.2592771208.6203611224.9991461244.2421881267.8989261299.3465581348.911865

3. Reconciliate Predictions

with CodeTimer('Reconcile Predictions ', verbose):
    if is_strictly_hierarchical(S=S_df.values.astype(np.float32), 
        tags={key: S_df.index.get_indexer(val) for key, val in tags.items()}):
        reconcilers = [
            BottomUp(),
            TopDown(method='average_proportions'),
            TopDown(method='proportion_averages'),
            MinTrace(method='ols'),
            MinTrace(method='wls_var'),
            MinTrace(method='mint_shrink'),
            #ERM(method='reg_bu', lambda_reg=100) # Extremely inneficient
            ERM(method='closed')
        ]
    else:
        reconcilers = [
            BottomUp(),
            MinTrace(method='ols'),
            MinTrace(method='wls_var'),
            MinTrace(method='mint_shrink'),
            #ERM(method='reg_bu', lambda_reg=100) # Extremely inneficient
            ERM(method='closed')
        ]
    
    hrec = HierarchicalReconciliation(reconcilers=reconcilers)
    Y_rec_df = hrec.bootstrap_reconcile(Y_hat_df=Y_hat_df,
                                        Y_df=Y_fitted_df,
                                        S_df=S_df, tags=tags,
                                        level=LEVEL,
                                        intervals_method=intervals_method,
                                        num_samples=10, num_seeds=10)

    # Matching Y_test/Y_rec/S index ordering
    Y_test_df = Y_test_df.reset_index()
    Y_test_df.unique_id = Y_test_df.unique_id.astype('category')
    Y_test_df.unique_id = Y_test_df.unique_id.cat.set_categories(S_df.index)
    Y_test_df = Y_test_df.sort_values(by=['unique_id', 'ds'])

    Y_rec_df = Y_rec_df.reset_index()
    Y_rec_df.unique_id = Y_rec_df.unique_id.astype('category')
    Y_rec_df.unique_id = Y_rec_df.unique_id.cat.set_categories(S_df.index)
    Y_rec_df = Y_rec_df.sort_values(by=['seed', 'unique_id', 'ds'])

    # Parsing model level columns
    flat_cols = list(hrec.level_names.keys())
    for model in hrec.level_names:
        flat_cols += hrec.level_names[model]
    for model in hrec.sample_names:
        flat_cols += hrec.sample_names[model]
    y_rec  = Y_rec_df[flat_cols]
    model_columns = y_rec.columns

    n_series = len(S_df)
    n_seeds = len(Y_rec_df.seed.unique())
    y_rec  = y_rec.values.reshape(n_seeds, n_series, horizon, len(model_columns))
    y_test = Y_test_df['y'].values.reshape(n_series, horizon)
    y_train = Y_train_df['y'].values.reshape(n_series, -1)
Code block 'Reconcile Predictions ' took:   11.73492 seconds
# Qualitative evaluation, of parsed quantiles
row_idx = 0
seed_idx = 0
col_idxs = model_columns.get_indexer(hrec.level_names['AutoARIMA/BottomUp'])
for i, col in enumerate(col_idxs):
    plt.plot(y_rec[seed_idx, row_idx,:,col], color='orange', alpha=i/100)
for i, col in enumerate(col_idxs):
    plt.plot(y_rec[seed_idx+1, row_idx,:,col], color='green', alpha=i/100)
plt.plot(y_test[row_idx,:], label='True')
plt.title(f'{S_df.index[row_idx]} Visits \n' + \
          f'AutoARIMA/BottomUp-{intervals_method}')

plt.legend()
plt.grid()
plt.show()
plt.close()

#Y_rec_df
td_levels = hrec.level_names['AutoARIMA/TopDown_method-average_proportions']
Y_rec_df[td_levels]
AutoARIMA/TopDown_method-average_proportions-lo-98AutoARIMA/TopDown_method-average_proportions-lo-96AutoARIMA/TopDown_method-average_proportions-lo-94AutoARIMA/TopDown_method-average_proportions-lo-92AutoARIMA/TopDown_method-average_proportions-lo-90AutoARIMA/TopDown_method-average_proportions-lo-88AutoARIMA/TopDown_method-average_proportions-lo-86AutoARIMA/TopDown_method-average_proportions-lo-84AutoARIMA/TopDown_method-average_proportions-lo-82AutoARIMA/TopDown_method-average_proportions-lo-80AutoARIMA/TopDown_method-average_proportions-hi-80AutoARIMA/TopDown_method-average_proportions-hi-82AutoARIMA/TopDown_method-average_proportions-hi-84AutoARIMA/TopDown_method-average_proportions-hi-86AutoARIMA/TopDown_method-average_proportions-hi-88AutoARIMA/TopDown_method-average_proportions-hi-90AutoARIMA/TopDown_method-average_proportions-hi-92AutoARIMA/TopDown_method-average_proportions-hi-94AutoARIMA/TopDown_method-average_proportions-hi-96AutoARIMA/TopDown_method-average_proportions-hi-98
080750.38992080750.38992080750.38992082299.06178182299.06178182299.06178182600.02271682600.02271682600.02271682763.00709088248.62422988248.62422988248.62422988248.62422988384.15344790507.44452290507.44452290507.44452290507.44452290507.444522
161825.84321061825.84321061825.84321061825.84321061825.84321061825.84321063374.51507263374.51507263374.51507263374.51507268196.49940568196.49940568286.70565469324.07752069324.07752069437.01853471582.89781271582.89781271582.89781271582.897812
268249.62440468249.62440468249.62440468249.62440469798.29626669798.29626669798.29626669798.29626669798.29626669798.29626674620.28059874620.28059874620.28059874699.21106775747.85871475747.85871475747.85871475815.62332278006.67900678006.679006
367456.03066167456.03066167456.03066167456.03066169004.70252369004.70252369004.70252369004.70252369004.70252369305.66345773939.44466774954.26497174954.26497174954.26497174954.26497174954.26497175044.61778277213.08526377213.08526377213.085263
480371.81961180371.81961180371.81961180371.81961180371.81961181827.57116181920.49147281920.49147281920.49147281920.49147287870.05392087870.05392087870.05392087870.05392088005.58313890128.87421390128.87421390128.87421390128.87421390128.874213
7115132.959504132.959504132.959504132.959504132.959504135.828870136.012021136.012021136.012021136.545910147.738932147.738932147.738932152.191189152.191189152.191189152.191189152.191189152.191189152.191189
7116158.417227158.417227158.417227158.417227158.417227161.469743161.469743161.469743161.469743162.062953170.974136171.174163173.196654173.196654173.196654173.419267177.648912177.648912177.648912177.648912
7117122.066765122.066765125.119281125.119281125.119281125.119281125.712492125.712492125.712492125.712492134.623675134.623675134.801476136.846192136.846192136.846192137.024283141.298450141.298450141.298450
7118134.467576134.467576134.467576134.467576137.520093137.520093137.520093137.520093138.113303138.113303145.762241145.875843147.202288149.247004149.247004149.247004149.425094153.699262153.699262153.699262
7119135.996894136.027420136.027420136.027420136.027420136.027420136.027420136.573173136.620630136.620630145.531813145.531813145.709614147.754331147.754331147.754331147.932421152.206588152.206588152.206588

4. Evaluation

with CodeTimer('Evaluate Models CRPS  ', verbose):
    crps_results = {'Dataset': [dataset] * len(['Overall'] + list(tags.keys())),
                    'Level': ['Overall'] + list(tags.keys()),}

    for model in hrec.level_names.keys():
        crps_results[model] = []
        for level in crps_results['Level']:
            if level=='Overall':
                row_idxs = np.arange(len(S_df))
            else:
                row_idxs = S_df.index.get_indexer(tags[level])
            col_idxs = model_columns.get_indexer(hrec.level_names[model])
            _y = y_test[row_idxs,:]
            _y_rec_seeds = y_rec[:,row_idxs,:,:][:,:,:,col_idxs]

            level_model_crps = []
            for seed_idx in range(y_rec.shape[0]):
                _y_rec = _y_rec_seeds[seed_idx,:,:,:]
                level_model_crps.append(scaled_crps(y=_y, y_hat=_y_rec,
                                                    quantiles=QUANTILES))
            level_model_crps = f'{np.mean(level_model_crps):.4f}±{(1.96 * np.std(level_model_crps)):.4f}'
            crps_results[model].append(level_model_crps)

    crps_results = pd.DataFrame(crps_results)

crps_results
Code block 'Evaluate Models CRPS  ' took:   1.13514 seconds
DatasetLevelAutoARIMA/BottomUpAutoARIMA/TopDown_method-average_proportionsAutoARIMA/TopDown_method-proportion_averagesAutoARIMA/MinTrace_method-olsAutoARIMA/MinTrace_method-wls_varAutoARIMA/MinTrace_method-mint_shrinkAutoARIMA/ERM_method-closed_lambda_reg-0.01
0TourismSmallOverall0.0895±0.00120.1195±0.00080.1197±0.00080.0927±0.00100.0890±0.00100.0898±0.00090.1116±0.0015
1TourismSmallCountry0.0481±0.00160.0479±0.00110.0479±0.00110.0504±0.00100.0510±0.00110.0512±0.00110.0525±0.0015
2TourismSmallCountry/Purpose0.0699±0.00160.0928±0.00090.0931±0.00090.0804±0.00120.0724±0.00120.0741±0.00120.0927±0.0015
3TourismSmallCountry/Purpose/State0.1085±0.00110.1575±0.00090.1579±0.00090.1082±0.00110.1043±0.00090.1049±0.00080.1325±0.0018
4TourismSmallCountry/Purpose/State/CityNonCity0.1316±0.00120.1799±0.00080.1800±0.00080.1319±0.00130.1282±0.00110.1290±0.00100.1685±0.0029
with CodeTimer('Evaluate Models MSSE  ', verbose):
    msse_results = {'Dataset': [dataset] * len(['Overall'] + list(tags.keys())),
                    'Level': ['Overall'] + list(tags.keys()),}
    for model in hrec.level_names.keys():
        msse_results[model] = []
        for level in msse_results['Level']:
            if level=='Overall':
                row_idxs = np.arange(len(S_df))
            else:
                row_idxs = S_df.index.get_indexer(tags[level])
            col_idx = model_columns.get_loc(model)
            _y = y_test[row_idxs,:]
            _y_train = y_train[row_idxs,:]
            _y_hat_seeds = y_rec[:,row_idxs,:,:][:,:,:,col_idx]

            level_model_msse = []
            for seed_idx in range(y_rec.shape[0]):
                _y_hat = _y_hat_seeds[seed_idx,:,:]
                level_model_msse.append(msse(y=_y, y_hat=_y_hat, y_train=_y_train))
            #level_model_msse = f'{np.mean(level_model_msse):.4f}±{(1.96 * np.std(level_model_msse)):.4f}'
            level_model_msse = f'{np.mean(level_model_msse):.4f}'
            msse_results[model].append(level_model_msse)

    msse_results = pd.DataFrame(msse_results)

msse_results
Code block 'Evaluate Models MSSE  ' took:   0.73303 seconds
DatasetLevelAutoARIMA/BottomUpAutoARIMA/TopDown_method-average_proportionsAutoARIMA/TopDown_method-proportion_averagesAutoARIMA/MinTrace_method-olsAutoARIMA/MinTrace_method-wls_varAutoARIMA/MinTrace_method-mint_shrinkAutoARIMA/ERM_method-closed_lambda_reg-0.01
0TourismSmallOverall0.25300.36280.36490.30390.27890.28220.3942
1TourismSmallCountry0.25640.31800.31800.35220.33810.33940.4117
2TourismSmallCountry/Purpose0.20180.31780.32030.25570.21220.21750.3346
3TourismSmallCountry/Purpose/State0.32310.50770.51140.29430.28580.28900.4534
4TourismSmallCountry/Purpose/State/CityNonCity0.34230.50470.50990.32380.30830.31150.4791
with CodeTimer('Evaluate Models EScore', verbose):
    energy_results = {'Dataset': [dataset] * len(['Overall'] + list(tags.keys())),
                        'Level': ['Overall'] + list(tags.keys()),}
    for model in hrec.sample_names.keys():
        energy_results[model] = []
        for level in energy_results['Level']:
            if level=='Overall':
                row_idxs = np.arange(len(S_df))
            else:
                row_idxs = S_df.index.get_indexer(tags[level])
            col_idxs = model_columns.get_indexer(hrec.sample_names[model])
            _y = y_test[row_idxs,:]
            _y_sample1 = y_rec[0,row_idxs,:,:][:,:,col_idxs[:len(col_idxs)//2]]
            _y_sample2 = y_rec[0,row_idxs,:,:][:,:,col_idxs[len(col_idxs)//2:]]
            level_model_energy = energy_score(y=_y, 
                                              y_sample1=_y_sample1,
                                              y_sample2=_y_sample2,
                                              beta=2)
            energy_results[model].append(level_model_energy)
    energy_results = pd.DataFrame(energy_results)

energy_results
Code block 'Evaluate Models EScore' took:   0.19443 seconds
DatasetLevelAutoARIMA/BottomUpAutoARIMA/TopDown_method-average_proportionsAutoARIMA/TopDown_method-proportion_averagesAutoARIMA/MinTrace_method-olsAutoARIMA/MinTrace_method-wls_varAutoARIMA/MinTrace_method-mint_shrinkAutoARIMA/ERM_method-closed_lambda_reg-0.01
0TourismSmallOverall6.874103e+077.917294e+077.962361e+076.930268e+076.914837e+076.955018e+078.235776e+07
1TourismSmallCountry3.292999e+072.757131e+072.757129e+073.081254e+073.392861e+073.353851e+073.350023e+07
2TourismSmallCountry/Purpose1.894485e+072.661024e+072.683828e+072.218952e+071.932895e+071.984161e+072.681792e+07
3TourismSmallCountry/Purpose/State9.393103e+061.408613e+071.419471e+079.016056e+068.778983e+068.928542e+061.211747e+07
4TourismSmallCountry/Purpose/State/CityNonCity7.473085e+061.090527e+071.101934e+077.284562e+067.111832e+067.241519e+069.922145e+06

References