Skip to content

Models Module

The Models module provides a variety of actuarial and statistical modeling tools for reinsurance pricing. It includes submodules for experience rating, exposure rating, and aggregate features modeling.

Submodules

Experience Rating

The Experience module provides tools for analyzing historical claims experience:

  • burn_cost: Methods for burn cost analysis
  • curve_fitting: Functions for fitting curves to data
  • frequency_severity: Tools for frequency-severity modeling
  • pareto_rating: Pareto distribution-based rating methods
  • resampling: Methods for resampling claims data

Exposure Rating

The Exposure module provides tools for exposure-based rating:

  • mbbefd: Implementation of the Modified Beta Beta Equivalent Finite Difference (MBBEFD) distribution
  • mixed_exponential: Implementation of the Mixed Exponential distribution
  • pareto_ilf: Pareto distribution-based Increased Limit Factors
  • riebesell: Implementation of the Riebesell approximation

Aggregate Features

The AggregateFeatures module provides tools for modeling aggregate loss distributions:

  • aggregate_features: Methods for calculating aggregate loss statistics
  • selections: Tools for selecting and combining models
  • simulation_engine: Monte Carlo simulation engine for aggregate losses

The trending module provides tools for trending historical data to current levels.

Examples

Experience Rating

Burn Cost Analysis

from pyre.Models.Experience.burn_cost import BurnCostModel
from pyre.claims.claims import Claims
import numpy as np

# Assuming we have a Claims collection called 'claims_data'

# Create a burn cost model
model = BurnCostModel(claims_data)

# Calculate basic burn cost statistics
burn_cost = model.calculate_burn_cost()
print(f"Average burn cost: {burn_cost}")

# Calculate burn cost with trend
trend_factor = 1.05  # 5% annual trend
years_of_trend = 2
trended_burn_cost = model.calculate_trended_burn_cost(trend_factor, years_of_trend)
print(f"Trended burn cost: {trended_burn_cost}")

# Calculate burn cost with limits
attachment = 1000000
limit = 5000000
limited_burn_cost = model.calculate_limited_burn_cost(attachment, limit)
print(f"Limited burn cost: {limited_burn_cost}")

# Calculate confidence intervals
confidence_level = 0.95
lower, upper = model.calculate_confidence_interval(confidence_level)
print(f"{confidence_level*100}% confidence interval: ({lower}, {upper})")

Frequency-Severity Modeling

from pyre.Models.Experience.frequency_severity import FrequencySeverityModel
from pyre.claims.claims import Claims
import numpy as np
import matplotlib.pyplot as plt

# Assuming we have a Claims collection called 'claims_data'

# Create a frequency-severity model
model = FrequencySeverityModel(claims_data)

# Fit frequency distribution (Poisson)
freq_params = model.fit_frequency_distribution("poisson")
print(f"Frequency distribution parameters: {freq_params}")

# Fit severity distribution (Lognormal)
sev_params = model.fit_severity_distribution("lognormal")
print(f"Severity distribution parameters: {sev_params}")

# Simulate aggregate losses
num_simulations = 10000
aggregate_losses = model.simulate_aggregate_losses(num_simulations)

# Calculate statistics
mean_loss = np.mean(aggregate_losses)
median_loss = np.median(aggregate_losses)
var_95 = np.percentile(aggregate_losses, 95)
var_99 = np.percentile(aggregate_losses, 99)

print(f"Mean aggregate loss: {mean_loss}")
print(f"Median aggregate loss: {median_loss}")
print(f"95% VaR: {var_95}")
print(f"99% VaR: {var_99}")

# Plot histogram of aggregate losses
plt.figure(figsize=(10, 6))
plt.hist(aggregate_losses, bins=50, alpha=0.7)
plt.axvline(mean_loss, color='r', linestyle='--', label=f'Mean: {mean_loss:.2f}')
plt.axvline(var_95, color='g', linestyle='--', label=f'95% VaR: {var_95:.2f}')
plt.axvline(var_99, color='b', linestyle='--', label=f'99% VaR: {var_99:.2f}')
plt.legend()
plt.title('Simulated Aggregate Loss Distribution')
plt.xlabel('Aggregate Loss')
plt.ylabel('Frequency')
plt.show()

Exposure Rating

Increased Limit Factors

from pyre.Models.Exposure.pareto_ilf import ParetoILF
import numpy as np
import matplotlib.pyplot as plt

# Create a Pareto ILF model
alpha = 2.0  # Pareto shape parameter
model = ParetoILF(alpha)

# Calculate increased limit factors
base_limit = 1000000
limits = [1000000, 2000000, 5000000, 10000000, 25000000]
ilfs = [model.calculate_ilf(base_limit, limit) for limit in limits]

print("Increased Limit Factors:")
for limit, ilf in zip(limits, ilfs):
    print(f"  {limit/1000000}M: {ilf:.4f}")

# Calculate layer costs
attachment = 5000000
limit = 5000000
layer_cost = model.calculate_layer_cost(attachment, attachment + limit)
print(f"Layer cost for {attachment/1000000}M xs {limit/1000000}M: {layer_cost:.4f}")

# Plot ILF curve
limits_plot = np.linspace(base_limit, 25000000, 100)
ilfs_plot = [model.calculate_ilf(base_limit, limit) for limit in limits_plot]

plt.figure(figsize=(10, 6))
plt.plot(limits_plot/1000000, ilfs_plot)
plt.scatter([l/1000000 for l in limits], ilfs, color='red', s=50)
plt.title('Pareto Increased Limit Factors')
plt.xlabel('Limit (Millions)')
plt.ylabel('ILF')
plt.grid(True)
plt.show()
from pyre.Models.trending import Trending
from pyre.claims.claims import Claims, Claim, ClaimsMetaData, ClaimDevelopmentHistory
from pyre.exposures.exposures import Exposures, Exposure, ExposureMetaData, ExposureValues
from datetime import date
import pandas as pd

# Create sample trend factors
exposure_trend_factors = {
    2018: 1.05,
    2019: 1.04,
    2020: 1.03,
    2021: 1.02,
    2022: 1.01
}

claim_trend_factors = {
    2018: 1.06,
    2019: 1.05,
    2020: 1.04,
    2021: 1.03,
    2022: 1.02
}

# Create a Trending instance
base_year = 2023
trending = Trending(
    exposure_trend_factors=exposure_trend_factors,
    claim_trend_factors=claim_trend_factors,
    base_year=base_year
)

# Create sample exposures
exposures_list = [
    Exposure(
        ExposureMetaData(
            exposure_id=f"EXP{year}",
            exposure_name=f"Exposure {year}",
            exposure_period_start=date(year, 1, 1),
            exposure_period_end=date(year, 12, 31),
            currency="USD"
        ),
        ExposureValues(
            exposure_value=1000 * (1 + 0.1 * (year - 2018)),
            attachment_point=0,
            limit=0
        )
    )
    for year in range(2018, 2023)
]

# Create sample claims
claims_list = [
    Claim(
        ClaimsMetaData(
            claim_id=f"CL{year}",
            currency="USD",
            loss_date=date(year, 6, 15)
        ),
        ClaimDevelopmentHistory(
            development_months=[0, 12],
            cumulative_dev_paid=[0, 500 * (1 + 0.1 * (year - 2018))],
            cumulative_dev_incurred=[1000 * (1 + 0.1 * (year - 2018)), 800 * (1 + 0.1 * (year - 2018))]
        )
    )
    for year in range(2018, 2023)
]

# Create collections
exposures = Exposures(exposures_list)
claims = Claims(claims_list)

# Trend the exposures and claims to the base year
trended_exposures = trending.trend_exposures(exposures)
trended_claims = trending.trend_claims(claims)

# Print original and trended values
print("Original vs. Trended Exposures:")
for i, (orig, trended) in enumerate(zip(exposures, trended_exposures)):
    year = 2018 + i
    orig_value = orig.exposure_values.exposure_value
    trended_value = trended.exposure_values.exposure_value
    trend_factor = trending.calculate_trend_factor(year, for_claims=False)
    print(f"  {year}: Original=${orig_value:.2f}, Trended=${trended_value:.2f}, Factor={trend_factor:.4f}")

print("\nOriginal vs. Trended Claims (Latest Incurred):")
for i, (orig, trended) in enumerate(zip(claims, trended_claims)):
    year = 2018 + i
    orig_value = orig.uncapped_claim_development_history.latest_incurred()
    trended_value = trended.uncapped_claim_development_history.latest_incurred()
    trend_factor = trending.calculate_trend_factor(year, for_claims=True)
    print(f"  {year}: Original=${orig_value:.2f}, Trended=${trended_value:.2f}, Factor={trend_factor:.4f}")

# Get the trend factors
trend_factors = trending.get_trend_factors()
print("\nTrend Factors:")
print("  Exposure Trend Factors:", trend_factors['exposure'])
print("  Claim Trend Factors:", trend_factors['claim'])

# For backward compatibility, you can also use standalone functions
from pyre.Models.trending import calculate_trend_factor, trend_exposures, trend_claims

# Calculate a trend factor directly
origin_year = 2020
direct_trend_factor = calculate_trend_factor(origin_year, base_year, exposure_trend_factors)
print(f"\nDirect trend factor from {origin_year} to {base_year}: {direct_trend_factor:.4f}")

API Reference

BurnCostModel

BurnCostModel is a class that calculates burn costs for a reinsurance contract layer.

It uses various projection methods to estimate ultimate claims and calculate burn costs based on historical claims and exposures data.

Attributes:

Name Type Description
_modelling_years List[int]

The years to be used in the modelling.

_years_weighting Dict[int, float]

Weighting factors for each modelling year.

_projection_methods Dict[int, ProjectionMethods]

Projection method to use for each modelling year.

_development_pattern Dict[int, float]

Development factors for each modelling year.

_a_priori Dict[int, float]

A priori expected loss ratios for each modelling year.

_data ExperienceModelData

The claims and exposures data for the reinsurance contract.

_layer_id Any

The identifier for the reinsurance contract layer.

Source code in src\pyre\Models\Experience\burn_cost.py
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
class BurnCostModel:
    """
    BurnCostModel is a class that calculates burn costs for a reinsurance contract layer.

    It uses various projection methods to estimate ultimate claims and calculate burn costs
    based on historical claims and exposures data.

    Attributes:
        _modelling_years (List[int]): The years to be used in the modelling.
        _years_weighting (Dict[int, float]): Weighting factors for each modelling year.
        _projection_methods (Dict[int, ProjectionMethods]): Projection method to use for each modelling year.
        _development_pattern (Dict[int, float]): Development factors for each modelling year.
        _a_priori (Dict[int, float]): A priori expected loss ratios for each modelling year.
        _data (ExperienceModelData): The claims and exposures data for the reinsurance contract.
        _layer_id (Any): The identifier for the reinsurance contract layer.
    """
    def __init__(self,
                model_data: ExperienceModelData, 
                layer_id: Any, 
                years_weighting: Optional[Dict[int, float]] = None, 
                projection_methods: Optional[Dict[int, ProjectionMethods]] = None,
                development_pattern: Optional[Dict[int, float]] = None,
                a_priori_assumption: Optional[Dict[int, float]] = None) -> None:
        """
        Initialize a BurnCostModel instance.

        Args:
            model_data (ExperienceModelData): The claims and exposures data for the reinsurance contract.
            layer_id (Any): The identifier for the reinsurance contract layer.
            years_weighting (Optional[Dict[int, float]], optional): Weighting factors for each modelling year.
                If None, equal weights of 1.0 will be used for all years. Defaults to None.
            projection_methods (Optional[Dict[int, ProjectionMethods]], optional): Projection method to use for each modelling year.
                If None, SIMPLE_CAPE_COD will be used for all years. Defaults to None.
            development_pattern (Optional[Dict[int, float]], optional): Development factors for each modelling year.
                If None, factors of 1.0 will be used for all years (no development). Defaults to None.
            a_priori_assumption (Optional[Dict[int, float]], optional): A priori expected loss ratios for each modelling year.
                If None, values of 0.0 will be used for all years (no a priori assumption). Defaults to None.
        """
        self._modelling_years = model_data.exposures.modelling_years
        self._years_weighting = years_weighting if years_weighting else {year: 1.0 for year in model_data.exposures.modelling_years}
        self._projection_methods = projection_methods if projection_methods else {year: ProjectionMethods.SIMPLE_CAPE_COD for year in model_data.exposures.modelling_years}  # Default to simple cape cod method
        self._development_pattern = development_pattern if development_pattern else {year: 1.0 for year in model_data.exposures.modelling_years}  # Default to no development pattern
        self._a_priori = a_priori_assumption if a_priori_assumption else {year: 0.0 for year in model_data.exposures.modelling_years}  # Default to no a priori assumption
        self._data = model_data
        self._layer_id = layer_id

    @property
    def modelling_years(self) -> List[int]:
        """
        Get the years to be used in the modelling.

        Returns:
            List[int]: The modelling years.
        """
        return self._modelling_years

    @modelling_years.setter
    def modelling_years(self, years: List[int]) -> None:
        """
        Set the years to be used in the modelling.

        Args:
            years (List[int]): The new modelling years.
        """
        self._modelling_years = years

    @property
    def years_weighting(self) -> Dict[int, float]:
        """
        Get the weighting factors for each modelling year.

        Returns:
            Dict[int, float]: Mapping of modelling year to weighting factor.
        """
        return self._years_weighting

    @years_weighting.setter
    def years_weighting(self, weighting: Dict[int, float]) -> None:
        """
        Set the weighting factors for each modelling year.

        Args:
            weighting (Dict[int, float]): Mapping of modelling year to weighting factor.
        """
        self._years_weighting = weighting

    @property
    def projection_methods(self) -> Dict[int, ProjectionMethods]:
        """
        Get the projection method to use for each modelling year.

        Returns:
            Dict[int, ProjectionMethods]: Mapping of modelling year to projection method.
        """
        return self._projection_methods

    @projection_methods.setter
    def projection_methods(self, methods: Dict[int, ProjectionMethods]) -> None:
        """
        Set the projection method to use for each modelling year.

        Args:
            methods (Dict[int, ProjectionMethods]): Mapping of modelling year to projection method.
        """
        self._projection_methods = methods

    @property
    def development_pattern(self) -> Dict[int, float]:
        """
        Get the development factors for each modelling year.

        Returns:
            Dict[int, float]: Mapping of modelling year to development factor.
        """
        return self._development_pattern

    @development_pattern.setter
    def development_pattern(self, pattern: Dict[int, float]) -> None:
        """
        Set the development factors for each modelling year.

        Args:
            pattern (Dict[int, float]): Mapping of modelling year to development factor.
        """
        self._development_pattern = pattern

    @property
    def data(self) -> ExperienceModelData:
        """
        Get the claims and exposures data for the reinsurance contract.

        Returns:
            ExperienceModelData: The claims and exposures data.
        """
        return self._data

    @data.setter
    def data(self, model_data: ExperienceModelData) -> None:
        """
        Set the claims and exposures data for the reinsurance contract.

        Args:
            model_data (ExperienceModelData): The new claims and exposures data.
        """
        self._data = model_data

    @property
    def layer_id(self) -> Any:
        """
        Get the identifier for the reinsurance contract layer.

        Returns:
            Any: The layer identifier.
        """
        return self._layer_id

    @layer_id.setter
    def layer_id(self, lid: Any) -> None:
        """
        Set the identifier for the reinsurance contract layer.

        Args:
            lid (Any): The new layer identifier.
        """
        self._layer_id = lid

    @property
    def a_priori(self) -> Dict[int, float]:
        """
        Get the a priori expected loss ratios for each modelling year.

        Returns:
            Dict[int, float]: Mapping of modelling year to a priori expected loss ratio.
        """
        return self._a_priori

    @a_priori.setter
    def a_priori(self, prior: Dict[int, float]) -> None:
        """
        Set the a priori expected loss ratios for each modelling year.

        Args:
            prior (Dict[int, float]): Mapping of modelling year to a priori expected loss ratio.
        """
        self._a_priori = prior

    def calculate_burn_cost(self) -> Dict[int, float]:
        """
        Calculate the burn cost for each modelling year.

        This method applies the specified projection method for each modelling year
        to calculate the burn cost based on the claims and exposures data.

        Returns:
            Dict[int, float]: Mapping of modelling year to calculated burn cost.
        """
        burn_costs = {}
        for year in self._modelling_years:
            # Skip years that don't have data in the aggregate claims or exposures
            if (self._layer_id not in self._data.aggregate_subject_contract_claims or
                year not in self._data.aggregate_subject_contract_claims[self._layer_id] or
                year not in self._data.aggregate_exposures):
                continue

            # Get claims and exposures data for the year
            claims_data = self._data.aggregate_subject_contract_claims[self._layer_id][year]
            exposures_data = self._data.aggregate_exposures[year]

            # Get the latest incurred claims amount
            latest_incurred = claims_data.get("latest_incurred", 0.0)

            # Get the earned exposure value
            earned_exposure = exposures_data.get("earned", 0.0)

            # Skip years with zero exposure to avoid division by zero
            if earned_exposure == 0.0:
                continue

            # Get the projection method, development factor, and a priori for the year
            method = self._projection_methods.get(year, ProjectionMethods.SIMPLE_CAPE_COD)
            dev_factor = self._development_pattern.get(year, 1.0)
            a_priori_value = self._a_priori.get(year, 0.0)

            # Apply the projection method
            projection_fn = projection_methods_fn.get(method)
            try:
                if method == ProjectionMethods.CHAINLADDER:
                    ultimate_claims = projection_fn(latest_incurred, dev_factor)
                elif method in [ProjectionMethods.SIMPLE_CAPE_COD, ProjectionMethods.GENERALISED_CAPE_COD]:
                    # Collect data for all years to calculate the Cape Cod prior
                    trend_factors = []
                    losses = []
                    dev_factors = []
                    exposures_list = []

                    for yr in self._modelling_years:
                        if (yr in self._data.aggregate_subject_contract_claims.get(self._layer_id, {}) and 
                            yr in self._data.aggregate_exposures):
                            yr_claims = self._data.aggregate_subject_contract_claims[self._layer_id][yr]
                            yr_exposures = self._data.aggregate_exposures[yr]

                            trend_factors.append(1.0)  # Default trend factor, could be replaced with actual trend factors
                            losses.append(yr_claims.get("latest_incurred", 0.0))
                            dev_factors.append(self._development_pattern.get(yr, 1.0))
                            exposures_list.append(yr_exposures.get("earned", 0.0))

                    decay_factor = 0.0  # Default decay factor, could be a parameter of the model

                    if method == ProjectionMethods.SIMPLE_CAPE_COD:
                        ultimate_claims = projection_fn(
                            latest_incurred, earned_exposure, dev_factor,
                            trend_factors, losses, dev_factors, exposures_list
                        )
                    else:  # GENERALISED_CAPE_COD
                        ultimate_claims = projection_fn(
                            latest_incurred, earned_exposure, dev_factor,
                            trend_factors, losses, dev_factors, exposures_list, decay_factor
                        )
                else:  # BF
                    ultimate_claims = projection_fn(latest_incurred, earned_exposure, dev_factor, a_priori_value)

                # Calculate burn cost as ultimate claims divided by earned exposure
                burn_costs[year] = ultimate_claims / earned_exposure if earned_exposure > 0 else 0.0

            except Exception as e:
                # If there's an error, use the chainladder method as fallback
                ultimate_claims = chainladder_method(latest_incurred, dev_factor)
                burn_costs[year] = ultimate_claims / earned_exposure if earned_exposure > 0 else 0.0

        return burn_costs

a_priori property writable

Get the a priori expected loss ratios for each modelling year.

Returns:

Type Description
Dict[int, float]

Dict[int, float]: Mapping of modelling year to a priori expected loss ratio.

data property writable

Get the claims and exposures data for the reinsurance contract.

Returns:

Name Type Description
ExperienceModelData ExperienceModelData

The claims and exposures data.

development_pattern property writable

Get the development factors for each modelling year.

Returns:

Type Description
Dict[int, float]

Dict[int, float]: Mapping of modelling year to development factor.

layer_id property writable

Get the identifier for the reinsurance contract layer.

Returns:

Name Type Description
Any Any

The layer identifier.

modelling_years property writable

Get the years to be used in the modelling.

Returns:

Type Description
List[int]

List[int]: The modelling years.

projection_methods property writable

Get the projection method to use for each modelling year.

Returns:

Type Description
Dict[int, ProjectionMethods]

Dict[int, ProjectionMethods]: Mapping of modelling year to projection method.

years_weighting property writable

Get the weighting factors for each modelling year.

Returns:

Type Description
Dict[int, float]

Dict[int, float]: Mapping of modelling year to weighting factor.

__init__(model_data, layer_id, years_weighting=None, projection_methods=None, development_pattern=None, a_priori_assumption=None)

Initialize a BurnCostModel instance.

Parameters:

Name Type Description Default
model_data ExperienceModelData

The claims and exposures data for the reinsurance contract.

required
layer_id Any

The identifier for the reinsurance contract layer.

required
years_weighting Optional[Dict[int, float]]

Weighting factors for each modelling year. If None, equal weights of 1.0 will be used for all years. Defaults to None.

None
projection_methods Optional[Dict[int, ProjectionMethods]]

Projection method to use for each modelling year. If None, SIMPLE_CAPE_COD will be used for all years. Defaults to None.

None
development_pattern Optional[Dict[int, float]]

Development factors for each modelling year. If None, factors of 1.0 will be used for all years (no development). Defaults to None.

None
a_priori_assumption Optional[Dict[int, float]]

A priori expected loss ratios for each modelling year. If None, values of 0.0 will be used for all years (no a priori assumption). Defaults to None.

None
Source code in src\pyre\Models\Experience\burn_cost.py
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
def __init__(self,
            model_data: ExperienceModelData, 
            layer_id: Any, 
            years_weighting: Optional[Dict[int, float]] = None, 
            projection_methods: Optional[Dict[int, ProjectionMethods]] = None,
            development_pattern: Optional[Dict[int, float]] = None,
            a_priori_assumption: Optional[Dict[int, float]] = None) -> None:
    """
    Initialize a BurnCostModel instance.

    Args:
        model_data (ExperienceModelData): The claims and exposures data for the reinsurance contract.
        layer_id (Any): The identifier for the reinsurance contract layer.
        years_weighting (Optional[Dict[int, float]], optional): Weighting factors for each modelling year.
            If None, equal weights of 1.0 will be used for all years. Defaults to None.
        projection_methods (Optional[Dict[int, ProjectionMethods]], optional): Projection method to use for each modelling year.
            If None, SIMPLE_CAPE_COD will be used for all years. Defaults to None.
        development_pattern (Optional[Dict[int, float]], optional): Development factors for each modelling year.
            If None, factors of 1.0 will be used for all years (no development). Defaults to None.
        a_priori_assumption (Optional[Dict[int, float]], optional): A priori expected loss ratios for each modelling year.
            If None, values of 0.0 will be used for all years (no a priori assumption). Defaults to None.
    """
    self._modelling_years = model_data.exposures.modelling_years
    self._years_weighting = years_weighting if years_weighting else {year: 1.0 for year in model_data.exposures.modelling_years}
    self._projection_methods = projection_methods if projection_methods else {year: ProjectionMethods.SIMPLE_CAPE_COD for year in model_data.exposures.modelling_years}  # Default to simple cape cod method
    self._development_pattern = development_pattern if development_pattern else {year: 1.0 for year in model_data.exposures.modelling_years}  # Default to no development pattern
    self._a_priori = a_priori_assumption if a_priori_assumption else {year: 0.0 for year in model_data.exposures.modelling_years}  # Default to no a priori assumption
    self._data = model_data
    self._layer_id = layer_id

calculate_burn_cost()

Calculate the burn cost for each modelling year.

This method applies the specified projection method for each modelling year to calculate the burn cost based on the claims and exposures data.

Returns:

Type Description
Dict[int, float]

Dict[int, float]: Mapping of modelling year to calculated burn cost.

Source code in src\pyre\Models\Experience\burn_cost.py
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
def calculate_burn_cost(self) -> Dict[int, float]:
    """
    Calculate the burn cost for each modelling year.

    This method applies the specified projection method for each modelling year
    to calculate the burn cost based on the claims and exposures data.

    Returns:
        Dict[int, float]: Mapping of modelling year to calculated burn cost.
    """
    burn_costs = {}
    for year in self._modelling_years:
        # Skip years that don't have data in the aggregate claims or exposures
        if (self._layer_id not in self._data.aggregate_subject_contract_claims or
            year not in self._data.aggregate_subject_contract_claims[self._layer_id] or
            year not in self._data.aggregate_exposures):
            continue

        # Get claims and exposures data for the year
        claims_data = self._data.aggregate_subject_contract_claims[self._layer_id][year]
        exposures_data = self._data.aggregate_exposures[year]

        # Get the latest incurred claims amount
        latest_incurred = claims_data.get("latest_incurred", 0.0)

        # Get the earned exposure value
        earned_exposure = exposures_data.get("earned", 0.0)

        # Skip years with zero exposure to avoid division by zero
        if earned_exposure == 0.0:
            continue

        # Get the projection method, development factor, and a priori for the year
        method = self._projection_methods.get(year, ProjectionMethods.SIMPLE_CAPE_COD)
        dev_factor = self._development_pattern.get(year, 1.0)
        a_priori_value = self._a_priori.get(year, 0.0)

        # Apply the projection method
        projection_fn = projection_methods_fn.get(method)
        try:
            if method == ProjectionMethods.CHAINLADDER:
                ultimate_claims = projection_fn(latest_incurred, dev_factor)
            elif method in [ProjectionMethods.SIMPLE_CAPE_COD, ProjectionMethods.GENERALISED_CAPE_COD]:
                # Collect data for all years to calculate the Cape Cod prior
                trend_factors = []
                losses = []
                dev_factors = []
                exposures_list = []

                for yr in self._modelling_years:
                    if (yr in self._data.aggregate_subject_contract_claims.get(self._layer_id, {}) and 
                        yr in self._data.aggregate_exposures):
                        yr_claims = self._data.aggregate_subject_contract_claims[self._layer_id][yr]
                        yr_exposures = self._data.aggregate_exposures[yr]

                        trend_factors.append(1.0)  # Default trend factor, could be replaced with actual trend factors
                        losses.append(yr_claims.get("latest_incurred", 0.0))
                        dev_factors.append(self._development_pattern.get(yr, 1.0))
                        exposures_list.append(yr_exposures.get("earned", 0.0))

                decay_factor = 0.0  # Default decay factor, could be a parameter of the model

                if method == ProjectionMethods.SIMPLE_CAPE_COD:
                    ultimate_claims = projection_fn(
                        latest_incurred, earned_exposure, dev_factor,
                        trend_factors, losses, dev_factors, exposures_list
                    )
                else:  # GENERALISED_CAPE_COD
                    ultimate_claims = projection_fn(
                        latest_incurred, earned_exposure, dev_factor,
                        trend_factors, losses, dev_factors, exposures_list, decay_factor
                    )
            else:  # BF
                ultimate_claims = projection_fn(latest_incurred, earned_exposure, dev_factor, a_priori_value)

            # Calculate burn cost as ultimate claims divided by earned exposure
            burn_costs[year] = ultimate_claims / earned_exposure if earned_exposure > 0 else 0.0

        except Exception as e:
            # If there's an error, use the chainladder method as fallback
            ultimate_claims = chainladder_method(latest_incurred, dev_factor)
            burn_costs[year] = ultimate_claims / earned_exposure if earned_exposure > 0 else 0.0

    return burn_costs

ProjectionMethods

Bases: Enum

Enumeration of available projection methods for burn cost calculations.

Source code in src\pyre\Models\Experience\burn_cost.py
146
147
148
149
150
151
152
153
class ProjectionMethods(Enum):
    """
    Enumeration of available projection methods for burn cost calculations.
    """
    CHAINLADDER = auto()
    BF = auto()
    SIMPLE_CAPE_COD = auto()
    GENERALISED_CAPE_COD = auto()

bf_method(data, exposure, development_factor, a_priori)

Apply the Bornhuetter-Ferguson method to project ultimate claims.

Parameters:

Name Type Description Default
data float

The current claim amount.

required
exposure float

The exposure amount.

required
development_factor float

The development factor to apply.

required
a_priori float

The a priori expected loss ratio.

required

Returns:

Name Type Description
float float

The projected ultimate claim amount.

Source code in src\pyre\Models\Experience\burn_cost.py
19
20
21
22
23
24
25
26
27
28
29
30
31
32
def bf_method(data: float, exposure: float, development_factor: float, a_priori: float) -> float:
    """
    Apply the Bornhuetter-Ferguson method to project ultimate claims.

    Args:
        data (float): The current claim amount.
        exposure (float): The exposure amount.
        development_factor (float): The development factor to apply.
        a_priori (float): The a priori expected loss ratio.

    Returns:
        float: The projected ultimate claim amount.
    """
    return data + (1 - (1/development_factor)) * a_priori * exposure

cape_cod_method(data, exposure, development_factor, trend_factors=None, losses=None, development_factors=None, exposures=None)

Apply the Cape Cod method to project ultimate claims.

Parameters:

Name Type Description Default
data float

The current claim amount.

required
exposure float

The exposure amount.

required
development_factor float

The development factor to apply.

required
trend_factors List[float]

List of trend factors for each year.

None
losses List[float]

List of losses for each year.

None
development_factors List[float]

List of development factors for each year.

None
exposures List[float]

List of exposures for each year.

None

Returns:

Name Type Description
float float

The projected ultimate claim amount.

Raises:

Type Description
ValueError

If any of the required parameters for cape_cod_prior_algo are missing.

Source code in src\pyre\Models\Experience\burn_cost.py
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
def cape_cod_method(data: float, exposure: float, development_factor: float, 
                    trend_factors: List[float] = None, losses: List[float] = None, 
                    development_factors: List[float] = None, exposures: List[float] = None) -> float:
    """
    Apply the Cape Cod method to project ultimate claims.

    Args:
        data (float): The current claim amount.
        exposure (float): The exposure amount.
        development_factor (float): The development factor to apply.
        trend_factors (List[float], optional): List of trend factors for each year.
        losses (List[float], optional): List of losses for each year.
        development_factors (List[float], optional): List of development factors for each year.
        exposures (List[float], optional): List of exposures for each year.

    Returns:
        float: The projected ultimate claim amount.

    Raises:
        ValueError: If any of the required parameters for cape_cod_prior_algo are missing.
    """
    # If any of the required parameters for cape_cod_prior_algo are missing, raise an error
    if not all([trend_factors, losses, development_factors, exposures]):
        raise ValueError("Cape Cod method requires trend_factors, losses, development_factors, and exposures")

    cape_cod_prior = cape_cod_prior_algo(
        trend_factors=trend_factors,
        losses=losses,
        development_factors=development_factors,
        exposures=exposures,
        generalised=False
    )

    return bf_method(data, exposure, development_factor, a_priori=cape_cod_prior)

cape_cod_prior_algo(trend_factors, losses, development_factors, exposures, decay_factor=0.0, generalised=False)

Calculate the a priori expected loss ratio using the Cape Cod algorithm.

Parameters:

Name Type Description Default
trend_factors List[float]

List of trend factors for each year.

required
losses List[float]

List of losses for each year.

required
development_factors List[float]

List of development factors for each year.

required
exposures List[float]

List of exposures for each year.

required
decay_factor float

Decay factor for the generalised method. Defaults to 0.0.

0.0
generalised bool

Whether to use the generalised method. Defaults to False.

False

Returns:

Type Description
Union[Any, float]

Union[Any, float]: The a priori expected loss ratio.

Source code in src\pyre\Models\Experience\burn_cost.py
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
def cape_cod_prior_algo(trend_factors: List[float], losses: List[float], development_factors: List[float],
                        exposures: List[float], decay_factor: float = 0.0, generalised: bool = False) -> Union[
    Any, float]:
    """
    Calculate the a priori expected loss ratio using the Cape Cod algorithm.

    Args:
        trend_factors (List[float]): List of trend factors for each year.
        losses (List[float]): List of losses for each year.
        development_factors (List[float]): List of development factors for each year.
        exposures (List[float]): List of exposures for each year.
        decay_factor (float, optional): Decay factor for the generalised method. Defaults to 0.0.
        generalised (bool, optional): Whether to use the generalised method. Defaults to False.

    Returns:
        Union[Any, float]: The a priori expected loss ratio.
    """
    if generalised:
        # For the generalized method, we apply a decay factor to give different weights to different years
        weights = [exp(-decay_factor * i) for i in range(len(trend_factors))]

        # Calculate weighted pseudo claims and exposures
        psuedo_claims = sum(weights[i] * trend_factors[i] * losses[i] * (development_factors[i] / exposures[i])
                            for i in range(len(trend_factors)))
        psuedo_exposures = sum(weights[i] * exposures[i] / development_factors[i]
                               for i in range(len(exposures)))

        return psuedo_claims / psuedo_exposures
    else:
        # Standard Cape Cod method (already implemented)
        psuedo_claims = sum(trend_factors[i] * losses[i] * (development_factors[i] / exposures[i])
                            for i in range(len(trend_factors)))
        psuedo_exposures = sum(exposures[i] / development_factors[i]
                               for i in range(len(exposures)))
        return psuedo_claims / psuedo_exposures

chainladder_method(data, development_factor)

Apply the Chain Ladder method to project ultimate claims.

Parameters:

Name Type Description Default
data float

The current claim amount.

required
development_factor float

The development factor to apply.

required

Returns:

Name Type Description
float float

The projected ultimate claim amount.

Source code in src\pyre\Models\Experience\burn_cost.py
 6
 7
 8
 9
10
11
12
13
14
15
16
17
def chainladder_method(data: float, development_factor: float) -> float:
    """
    Apply the Chain Ladder method to project ultimate claims.

    Args:
        data (float): The current claim amount.
        development_factor (float): The development factor to apply.

    Returns:
        float: The projected ultimate claim amount.
    """
    return data * development_factor

generalised_cape_cod_method(data, exposure, development_factor, trend_factors=None, losses=None, development_factors=None, exposures=None, decay_factor=0.0)

Apply the Generalised Cape Cod method to project ultimate claims.

Parameters:

Name Type Description Default
data float

The current claim amount.

required
exposure float

The exposure amount.

required
development_factor float

The development factor to apply.

required
trend_factors List[float]

List of trend factors for each year.

None
losses List[float]

List of losses for each year.

None
development_factors List[float]

List of development factors for each year.

None
exposures List[float]

List of exposures for each year.

None
decay_factor float

Decay factor for the generalised method. Defaults to 0.0.

0.0

Returns:

Name Type Description
float float

The projected ultimate claim amount.

Raises:

Type Description
ValueError

If any of the required parameters for cape_cod_prior_algo are missing.

Source code in src\pyre\Models\Experience\burn_cost.py
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
def generalised_cape_cod_method(data: float, exposure: float, development_factor: float,
                               trend_factors: List[float] = None, losses: List[float] = None,
                               development_factors: List[float] = None, exposures: List[float] = None,
                               decay_factor: float = 0.0) -> float:
    """
    Apply the Generalised Cape Cod method to project ultimate claims.

    Args:
        data (float): The current claim amount.
        exposure (float): The exposure amount.
        development_factor (float): The development factor to apply.
        trend_factors (List[float], optional): List of trend factors for each year.
        losses (List[float], optional): List of losses for each year.
        development_factors (List[float], optional): List of development factors for each year.
        exposures (List[float], optional): List of exposures for each year.
        decay_factor (float, optional): Decay factor for the generalised method. Defaults to 0.0.

    Returns:
        float: The projected ultimate claim amount.

    Raises:
        ValueError: If any of the required parameters for cape_cod_prior_algo are missing.
    """
    # If any of the required parameters for cape_cod_prior_algo are missing, raise an error
    if not all([trend_factors, losses, development_factors, exposures]):
        raise ValueError("Generalised Cape Cod method requires trend_factors, losses, development_factors, and exposures")

    generalised_cape_cod_prior = cape_cod_prior_algo(
        trend_factors=trend_factors,
        losses=losses,
        development_factors=development_factors,
        exposures=exposures,
        decay_factor=decay_factor,
        generalised=True
    )

    return bf_method(data, exposure, development_factor, a_priori=generalised_cape_cod_prior)

assess_error_assumptions(actual, expected, num_parameters)

Assesses the error term based on standardized residuals and calculates: - The proportion of positive standardized residuals. - The proportion of standardized residuals outside the range (-2, 2).

Parameters:

Name Type Description Default
actual List[float]

The actual observed values.

required
expected List[float]

The expected values from the model.

required
num_parameters int

The number of parameters in the model.

required

Returns:

Name Type Description
dict dict

A dictionary containing: - 'proportion_positive': Proportion of positive standardized residuals. - 'proportion_outside_range': Proportion of standardized residuals outside (-2, 2). - 'mean_residual': Mean of the standardized residuals. - 'std_residual': Standard deviation of the standardized residuals.

Source code in src\pyre\Models\Experience\curve_fitting.py
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
def assess_error_assumptions(actual: List[float], expected: List[float], num_parameters: int) -> dict:
    """
    Assesses the error term based on standardized residuals and calculates:
    - The proportion of positive standardized residuals.
    - The proportion of standardized residuals outside the range (-2, 2).

    Args:
        actual (List[float]): The actual observed values.
        expected (List[float]): The expected values from the model.
        num_parameters (int): The number of parameters in the model.

    Returns:
        dict: A dictionary containing:
            - 'proportion_positive': Proportion of positive standardized residuals.
            - 'proportion_outside_range': Proportion of standardized residuals outside (-2, 2).
            - 'mean_residual': Mean of the standardized residuals.
            - 'std_residual': Standard deviation of the standardized residuals.
    """
    # Calculate standardized residuals
    residuals = residuals_standardised(actual, expected, num_parameters)

    # Proportion of positive standardized residuals
    proportion_positive = sum(1 for r in residuals if r > 0) / len(residuals)

    # Proportion of standardized residuals outside the range (-2, 2)
    proportion_outside_range = sum(1 for r in residuals if r < -2 or r > 2) / len(residuals)

    # Mean and standard deviation of standardized residuals
    mean_residual = sum(residuals) / len(residuals)
    std_residual = sqrt(sum((r - mean_residual) ** 2 for r in residuals) / len(residuals))

    return {
        "proportion_positive": proportion_positive,
        "proportion_outside_range": proportion_outside_range,
        "mean_residual": mean_residual,
        "std_residual": std_residual,
    }

exponential_fit(age_to_age_factors, time_periods)

Fits an exponential curve to the given age-to-age factors using the model: rj = exp(a + b * t), where ln(rj) = a + b * t.

Parameters:

Name Type Description Default
age_to_age_factors List[float]

The incremental age-to-age factors (rj).

required
time_periods List[int]

The corresponding time periods (t).

required

Returns:

Type Description
Tuple[float, float]

List[float]: A list containing the parameters [a, b] of the exponential curve.

Source code in src\pyre\Models\Experience\curve_fitting.py
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
def exponential_fit(age_to_age_factors: List[float], time_periods: List[float]) -> Tuple[float,float]:
    """
    Fits an exponential curve to the given age-to-age factors using the model:
    rj = exp(a + b * t), where ln(rj) = a + b * t.

    Args:
        age_to_age_factors (List[float]): The incremental age-to-age factors (rj).
        time_periods (List[int]): The corresponding time periods (t).

    Returns:
        List[float]: A list containing the parameters [a, b] of the exponential curve.
    """
    ln_rj = [log(rj - 1) for rj in age_to_age_factors]
    b, a = linear_regression(time_periods, ln_rj)
    return (a, b)

inverse_power_fit(age_to_age_factors, time_periods, c_values)

Fits a Sherman Curve (Inverse Power Curve) to the given incremental age-to-age factors using the model: rj = a * (t + c)^b.

Parameters:

Name Type Description Default
age_to_age_factors List[float]

The incremental age-to-age factors (rj).

required
time_periods List[int]

The corresponding time periods (t).

required
c_values List[float]

A list of candidate values for c to test.

required

Returns:

Type Description
Tuple[float, float, float]

List[float]: A list containing the parameters [a, b, c] of the Sherman Curve.

Source code in src\pyre\Models\Experience\curve_fitting.py
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
def inverse_power_fit(age_to_age_factors: List[float], time_periods: List[float], c_values: List[float]) -> Tuple[float, float, float]:
    """
    Fits a Sherman Curve (Inverse Power Curve) to the given incremental age-to-age factors using the model:
    rj = a * (t + c)^b.

    Args:
        age_to_age_factors (List[float]): The incremental age-to-age factors (rj).
        time_periods (List[int]): The corresponding time periods (t).
        c_values (List[float]): A list of candidate values for c to test.

    Returns:
        List[float]: A list containing the parameters [a, b, c] of the Sherman Curve.
    """
    def calculate_standard_error(a: float, b: float, c: float) -> float:
        predicted_rj = [a * ((t + c) ** b) for t in time_periods]
        errors = [(rj - pred_rj) ** 2 for rj, pred_rj in zip(age_to_age_factors, predicted_rj)]
        return sqrt(sum(errors) / len(errors))

    best_a, best_b, best_c = 0.0, 0.0, 0.0
    min_standard_error = float('inf')

    for c in c_values:
        ln_rj = [log(rj - 1) for rj in age_to_age_factors]
        ln_t_plus_c = [log(t + c) for t in time_periods]
        b, ln_a = linear_regression(ln_t_plus_c, ln_rj)
        a = exp(ln_a)
        standard_error = calculate_standard_error(a, b, c)

        if standard_error < min_standard_error:
            min_standard_error = standard_error
            best_a, best_b, best_c = a, b, c

    return (best_a, best_b, best_c)

linear_regression(x, y)

Performs linear regression to calculate the slope and intercept.

Parameters:

Name Type Description Default
x List[float]

The independent variable values.

required
y List[float]

The dependent variable values.

required

Returns:

Type Description
Tuple[float, float]

Tuple[float, float]: The slope and intercept of the regression line.

Source code in src\pyre\Models\Experience\curve_fitting.py
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
def linear_regression(x: List[float], y: List[float]) -> Tuple[float, float]:
    """
    Performs linear regression to calculate the slope and intercept.

    Args:
        x (List[float]): The independent variable values.
        y (List[float]): The dependent variable values.

    Returns:
        Tuple[float, float]: The slope and intercept of the regression line.
    """
    mean_x = sum(x) / len(x)
    mean_y = sum(y) / len(y)

    numerator = sum((xi - mean_x) * (yi - mean_y) for xi, yi in zip(x, y))
    denominator = sum((xi - mean_x) ** 2 for xi in x)

    slope = numerator / denominator
    intercept = mean_y - slope * mean_x

    return (slope, intercept)

power_fit(age_to_age_factors, time_periods)

Fits a power curve to the given cumulative age-to-age factors using the model: Rj = a * (b^t), where ln(ln(Rj)) = ln(ln(a)) + (ln(b) * t).

Parameters:

Name Type Description Default
age_to_age_factors List[float]

The cumulative age-to-age factors (Rj).

required
time_periods List[int]

The corresponding time periods (t).

required

Returns:

Type Description
Tuple[float, float]

List[float]: A list containing the parameters [a, b] of the power curve.

Source code in src\pyre\Models\Experience\curve_fitting.py
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
def power_fit(age_to_age_factors: List[float], time_periods: List[float]) -> Tuple[float,float]:
    """
    Fits a power curve to the given cumulative age-to-age factors using the model:
    Rj = a * (b^t), where ln(ln(Rj)) = ln(ln(a)) + (ln(b) * t).

    Args:
        age_to_age_factors (List[float]): The cumulative age-to-age factors (Rj).
        time_periods (List[int]): The corresponding time periods (t).

    Returns:
        List[float]: A list containing the parameters [a, b] of the power curve.
    """
    ln_ln_Rj = [log(log(Rj)) for Rj in age_to_age_factors]
    ln_b, ln_ln_a = linear_regression(time_periods, ln_ln_Rj)
    a = exp(exp(ln_ln_a))
    b = exp(ln_b)
    return (a, b)

r_squared(actual, expected)

Calculates the R-squared (coefficient of determination) value.

Parameters:

Name Type Description Default
actual List[float]

The actual observed values.

required
expected List[float]

The expected values from the model.

required

Returns:

Name Type Description
float float

The R-squared value.

Source code in src\pyre\Models\Experience\curve_fitting.py
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
def r_squared(actual: List[float], expected: List[float]) -> float:
    """
    Calculates the R-squared (coefficient of determination) value.

    Args:
        actual (List[float]): The actual observed values.
        expected (List[float]): The expected values from the model.

    Returns:
        float: The R-squared value.
    """
    # Calculate the total sum of squares (TSS)
    mean_actual = sum(actual) / len(actual)
    total_sum_of_squares = sum((a - mean_actual) ** 2 for a in actual)

    # Calculate the residual sum of squares (RSS)
    residual_sum_of_squares = sum((a - e) ** 2 for a, e in zip(actual, expected))

    # Calculate R-squared
    r_squared_value = 1 - (residual_sum_of_squares / total_sum_of_squares)

    return r_squared_value

weibull_fit(age_to_age_factors, time_periods)

Fits a Weibull curve to the given cumulative age-to-age factors using the model: Rj = 1 / (1 - exp(-a * t^b)).

Parameters:

Name Type Description Default
age_to_age_factors List[float]

The cumulative age-to-age factors (Rj).

required
time_periods List[int]

The corresponding time periods (t).

required

Returns:

Type Description
Tuple[float, float]

List[float]: A list containing the parameters [a, b] of the Weibull curve.

Source code in src\pyre\Models\Experience\curve_fitting.py
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
def weibull_fit(age_to_age_factors: List[float], time_periods: List[float]) -> Tuple[float,float]:
    """
    Fits a Weibull curve to the given cumulative age-to-age factors using the model:
    Rj = 1 / (1 - exp(-a * t^b)).

    Args:
        age_to_age_factors (List[float]): The cumulative age-to-age factors (Rj).
        time_periods (List[int]): The corresponding time periods (t).

    Returns:
        List[float]: A list containing the parameters [a, b] of the Weibull curve.
    """
    transformed_Rj = [log(-log(1 - 1 / Rj)) for Rj in age_to_age_factors]
    ln_t = [log(t) for t in time_periods]
    b, ln_a = linear_regression(ln_t, transformed_Rj)
    a = exp(ln_a)
    return (a, b)

severity_fit

Source code in src\pyre\Models\Experience\frequency_severity.py
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
class severity_fit:
    def __init__(self, distributions: List[SeverityDistribution], data : ExperienceModelData, ibner_dev_pattern: Dict[int, float], ground_up:bool = True):
        self.data = data
        self.ground_up_model = ground_up
        self.ibner_dev_pattern = ibner_dev_pattern # could pass IBNERPatternExtractor().getIBNERPATTERN as outputs Dict[int,float] if needed
        self.distributions = distributions

    @property
    def _inidividual_projected_claims(self) -> Any | List[float]:
        if self.ground_up_model: 
            [claim.capped_claim_development_history.latest_incurred * self.ibner_dev_pattern[claim.claims_meta_data.modelling_year] for claim in self.data.trended_claims.claims]
        else: 
            return NotImplementedError("non groud up fitting hasn't been implemented yet") 


    def fit(self):
        """
        Fit the severity distribution to the data.
        This method should implement the logic to fit the specified severity distributions
        to the data provided in the ExperienceModelData instance.
        """
        self._inidividual_projected_claims 
        pass

fit()

Fit the severity distribution to the data. This method should implement the logic to fit the specified severity distributions to the data provided in the ExperienceModelData instance.

Source code in src\pyre\Models\Experience\frequency_severity.py
31
32
33
34
35
36
37
38
def fit(self):
    """
    Fit the severity distribution to the data.
    This method should implement the logic to fit the specified severity distributions
    to the data provided in the ExperienceModelData instance.
    """
    self._inidividual_projected_claims 
    pass

resampling

A class for resampling claims data from an ExperienceModelData object.

This class provides functionality to randomly select claims from a set of trended claims for the purpose of simulation or bootstrapping analysis. It uses the random.choice function to select individual claims from the trended claims data.

Attributes:

Name Type Description
_claims

A collection of trended claims from the ExperienceModelData object. These are claims that have been adjusted to the contract inception year.

Note

Future enhancements planned include support for different return periods and more sophisticated resampling methods. The implementation relies on the Claims class having an iterator method to work with the random.choice function.

Source code in src\pyre\Models\Experience\resampling.py
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
class resampling():
    """
    A class for resampling claims data from an ExperienceModelData object.

    This class provides functionality to randomly select claims from a set of trended claims
    for the purpose of simulation or bootstrapping analysis. It uses the random.choice function
    to select individual claims from the trended claims data.

    Attributes:
        _claims: A collection of trended claims from the ExperienceModelData object.
                These are claims that have been adjusted to the contract inception year.

    Note:
        Future enhancements planned include support for different return periods and
        more sophisticated resampling methods. The implementation relies on the Claims
        class having an iterator method to work with the random.choice function.
    """
    def __init__(self, claims: ExperienceModelData) -> None:
        """
        Initialize the resampling class with claims data.

        Parameters:
            claims (ExperienceModelData): An ExperienceModelData object containing the claims
                                         to be resampled. The trended_claims property of this
                                         object will be used for resampling.
        """
        self._claims = ExperienceModelData.trended_claims #trended loss prior to subject losses


    def resample(self) -> None:
        """
        Randomly select a claim from the collection of trended claims.

        This method uses the random.choice function to select a single claim
        from the trended claims data. Currently, it doesn't return the selected
        claim but this behavior may change in future implementations.

        Returns:
            None: Currently doesn't return anything, but may be updated to return
                 the selected claim in future implementations.

        Note:
            Future enhancements will include support for different return periods
            and more sophisticated resampling methods.
        """
        choice(self._claims)

__init__(claims)

Initialize the resampling class with claims data.

Parameters:

Name Type Description Default
claims ExperienceModelData

An ExperienceModelData object containing the claims to be resampled. The trended_claims property of this object will be used for resampling.

required
Source code in src\pyre\Models\Experience\resampling.py
22
23
24
25
26
27
28
29
30
31
def __init__(self, claims: ExperienceModelData) -> None:
    """
    Initialize the resampling class with claims data.

    Parameters:
        claims (ExperienceModelData): An ExperienceModelData object containing the claims
                                     to be resampled. The trended_claims property of this
                                     object will be used for resampling.
    """
    self._claims = ExperienceModelData.trended_claims #trended loss prior to subject losses

resample()

Randomly select a claim from the collection of trended claims.

This method uses the random.choice function to select a single claim from the trended claims data. Currently, it doesn't return the selected claim but this behavior may change in future implementations.

Returns:

Name Type Description
None None

Currently doesn't return anything, but may be updated to return the selected claim in future implementations.

Note

Future enhancements will include support for different return periods and more sophisticated resampling methods.

Source code in src\pyre\Models\Experience\resampling.py
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
def resample(self) -> None:
    """
    Randomly select a claim from the collection of trended claims.

    This method uses the random.choice function to select a single claim
    from the trended claims data. Currently, it doesn't return the selected
    claim but this behavior may change in future implementations.

    Returns:
        None: Currently doesn't return anything, but may be updated to return
             the selected claim in future implementations.

    Note:
        Future enhancements will include support for different return periods
        and more sophisticated resampling methods.
    """
    choice(self._claims)

swissRe_c_values

Bases: Enum

summary https://www.swissre.com/dam/jcr:7137dac0-83a6-4cfa-80a4-93d33c35562f/exposure-rating-brochure.pdf

Source code in src\pyre\Models\Exposure\exposure_curve_functions.py
20
21
22
23
24
25
26
27
28
29
30
31
class swissRe_c_values(Enum):
    """_summary_
    https://www.swissre.com/dam/jcr:7137dac0-83a6-4cfa-80a4-93d33c35562f/exposure-rating-brochure.pdf
    """
    PERSONAL_LINES = 1.5
    COMMERCIAL_LINES_SMALL = 2.0
    COMMERCIAL_LINES_MEDIUM = 3.0
    CAPTIVE_BI = 2.1
    CAPTIVE_PD = 3.8
    CAPTIVE_BI_PD = 3.4
    INDUSTRIAL_LARGE_COMMERCIAL = 4.0
    LLOYDS_INDUSTRY = 5.0

calculate_curve(curve_type, parameters, position)

Calculate curve value based on curve type and parameters.

Parameters:

Name Type Description Default
curve_type ExposureCurveType

Type of curve to use

required
parameters Dict[str, Any]

Dictionary containing curve-specific parameters

required
position float

Position on the curve

required

Returns:

Name Type Description
float float

Calculated curve value

Source code in src\pyre\Models\Exposure\exposure_curve_functions.py
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
def calculate_curve(curve_type: ExposureCurveType, parameters: Dict[str, Any], position: float) -> float:
    """Calculate curve value based on curve type and parameters.

    Args:
        curve_type: Type of curve to use
        parameters: Dictionary containing curve-specific parameters
        position: Position on the curve

    Returns:
        float: Calculated curve value
    """
    if curve_type not in exposure_curve_calculation:
        raise ValueError(f"Unsupported curve type: {curve_type}")

    func = exposure_curve_calculation[curve_type]
    return func(**parameters, curve_position=position)

mbbefd_curve(curve, curve_position)

Calculate the MBBEFD curve value.

Parameters:

Name Type Description Default
curve Union[swissRe_c_values, float]

Either a swissRe_c_values enum or a manual c-value as float

required
curve_position float

Position on the curve

required

Returns:

Name Type Description
float float

The calculated curve value

Source code in src\pyre\Models\Exposure\exposure_curve_functions.py
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
def mbbefd_curve(curve: Union[swissRe_c_values, float], curve_position: float) -> float:
    """Calculate the MBBEFD curve value.

    Args:
        curve (Union[swissRe_c_values, float]): Either a swissRe_c_values enum or a manual c-value as float
        curve_position (float): Position on the curve

    Returns:
        float: The calculated curve value
    """
    # Extract the c-value - either from enum or use the float directly
    c_value = curve.value if isinstance(curve, swissRe_c_values) else curve

    # Calculate using the c-value
    b = exp(3.1 - 0.15 * (1 + c_value) * c_value)
    g = exp((0.78 + 0.12 * c_value) * c_value)
    return log(((g - 1) * b + (1 - b * g) * b ** curve_position) / (1 - b)) / log(b * g)

mixed_exponential_curve(paramaters_mus, parameter_weights, curve_position_value)

summary

Parameters:

Name Type Description Default
paramaters_mus list[float]

description

required
parameter_weights list[float]

description

required
curve_position_value float

description

required

Returns:

Name Type Description
float float

description

Source code in src\pyre\Models\Exposure\exposure_curve_functions.py
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
def mixed_exponential_curve(paramaters_mus:list[float], parameter_weights:list[float], curve_position_value:float) -> float:
    """_summary_

    Args:
        paramaters_mus (list[float]): _description_
        parameter_weights (list[float]): _description_
        curve_position_value (float): _description_

    Returns:
        float: _description_
    """
    total_limited_severity = 0
    for mu, weight in zip(paramaters_mus, parameter_weights):
        if mu != 0:
            contributing_limiting_severity = (1 - exp((-1 / mu) * curve_position_value)) * mu
            total_limited_severity += contributing_limiting_severity * weight
    return total_limited_severity

riebesell_curve(attachment, limit, z_value, base_limit)

summary

Parameters:

Name Type Description Default
attachment float

description

required
limit float

description

required
z_value float

description

required
base_limit float

description

required

Returns:

Name Type Description
_type_

description

Source code in src\pyre\Models\Exposure\exposure_curve_functions.py
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
def riebesell_curve(attachment: float, limit: float, z_value: float, base_limit: float):
    """_summary_

    Args:
        attachment (float): _description_
        limit (float): _description_
        z_value (float): _description_
        base_limit (float): _description_

    Returns:
        _type_: _description_
    """
    if limit is None:
        return ((attachment) / base_limit) ** log(1 + z_value, 2)
    else:
        return ((attachment + limit) / base_limit) ** log(1 + z_value, 2)

CredibilityWeight

Source code in src\pyre\Models\AggregateFeatures\selections.py
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
class CredibilityWeight:
    def __init__(self, experience_data: ExperienceModelData, exposure_data: ExposureModelData) -> None:
        """
        Initialize the CredibilityWeight class with experience and exposure data.

        Args:
            experience_data: Experience model data containing claims and exposures
            exposure_data: Exposure model data containing exposures
        """
        self.experience_data = experience_data
        self.exposure_data = exposure_data

    def calculate_sample_variance(self, data: List[float]) -> float:
        """
        Calculate the sample variance of a list of data points.

        Args:
            data: List of observed values

        Returns:
            Sample variance of the data
        """
        if not data or len(data) < 2:
            return 0.0

        mean = sum(data) / len(data)
        variance = sum((x - mean) ** 2 for x in data) / (len(data) - 1)  # Using n-1 for sample variance

        return variance

    def calculate_data_variance(self, data: List[float], method: str = "sample") -> float:
        """
        Calculate the variance of data using different methods.

        Args:
            data: List of observed values
            method: Method to use for variance calculation:
                   "sample" - standard sample variance
                   "population" - population variance
                   "process" - estimate of process variance for credibility

        Returns:
            Variance of the data based on the specified method
        """
        if not data or len(data) < 2:
            return 0.0

        mean = sum(data) / len(data)

        if method == "sample":
            # Sample variance (unbiased estimator)
            return sum((x - mean) ** 2 for x in data) / (len(data) - 1)
        elif method == "population":
            # Population variance
            return sum((x - mean) ** 2 for x in data) / len(data)
        elif method == "process":
            # Process variance estimate for credibility calculations
            # This is often the within-variance component
            return sum((x - mean) ** 2 for x in data) / len(data)
        else:
            raise ValueError(f"Invalid variance calculation method: {method}")

    def estimate_process_variance(self, data_by_group: Dict[Any, List[float]]) -> float:
        """
        Estimate the process variance (within variance) from grouped data.
        This is useful for Bühlmann and Bühlmann-Straub credibility methods.

        Args:
            data_by_group: Dictionary mapping group identifiers to lists of observed values

        Returns:
            Estimated process variance
        """
        if not data_by_group:
            return 0.0

        total_variance = 0.0
        total_weight = 0.0

        for group, values in data_by_group.items():
            if len(values) < 2:
                continue

            # Calculate within-group variance
            group_mean = sum(values) / len(values)
            group_variance = sum((x - group_mean) ** 2 for x in values) / len(values)

            # Weight by group size
            weight = len(values)
            total_variance += group_variance * weight
            total_weight += weight

        if total_weight <= 0:
            return 0.0

        return total_variance / total_weight

    def estimate_variance_of_hypothetical_means(self, data_by_group: Dict[Any, List[float]]) -> float:
        """
        Estimate the variance of hypothetical means (between variance) from grouped data.
        This is useful for Bühlmann and Bühlmann-Straub credibility methods.

        Args:
            data_by_group: Dictionary mapping group identifiers to lists of observed values

        Returns:
            Estimated variance of hypothetical means
        """
        if not data_by_group:
            return 0.0

        # Calculate overall mean
        all_values = []
        for values in data_by_group.values():
            all_values.extend(values)

        if not all_values:
            return 0.0

        overall_mean = sum(all_values) / len(all_values)

        # Calculate group means
        group_means = {}
        group_sizes = {}

        for group, values in data_by_group.items():
            if not values:
                continue

            group_means[group] = sum(values) / len(values)
            group_sizes[group] = len(values)

        # Calculate variance of group means
        weighted_sum_squared_diff = 0.0
        total_weight = 0.0

        for group, mean in group_means.items():
            weight = group_sizes[group]
            weighted_sum_squared_diff += weight * ((mean - overall_mean) ** 2)
            total_weight += weight

        if total_weight <= 0:
            return 0.0

        # Calculate raw between variance
        raw_between_variance = weighted_sum_squared_diff / total_weight

        # Adjust for within-group variance
        process_variance = self.estimate_process_variance(data_by_group)

        # Calculate average group size
        avg_group_size = sum(group_sizes.values()) / len(group_sizes) if group_sizes else 0

        # Adjust between variance by removing the expected contribution from process variance
        adjusted_between_variance = max(0, raw_between_variance - (process_variance / avg_group_size))

        return adjusted_between_variance

    def limited_fluctuation_credibility(self, claim_count: int, full_credibility_standard: int = 1082) -> float:
        """
        Calculate credibility using the Limited Fluctuation (Classical) Credibility method.
        Based on the formula Z = min(sqrt(n/n_full), 1) where n_full is the full credibility standard.

        The default full_credibility_standard of 1082 corresponds to a 95% confidence level
        with a 5% margin of error assuming a Poisson frequency process.

        Args:
            claim_count: Number of claims
            full_credibility_standard: Number of claims needed for full credibility

        Returns:
            Credibility factor between 0 and 1
        """
        if claim_count <= 0 or full_credibility_standard <= 0:
            return 0.0

        credibility = math.sqrt(claim_count / full_credibility_standard)
        return min(credibility, 1.0)

    def buhlmann_credibility(self, claim_count: int, expected_process_variance: float, 
                             variance_of_hypothetical_means: float) -> float:
        """
        Calculate credibility using the Bühlmann Credibility method.
        Based on the formula Z = n / (n + k) where k = EPV / VHM.

        Args:
            claim_count: Number of claims
            expected_process_variance: Expected value of the process variance (EPV)
            variance_of_hypothetical_means: Variance of the hypothetical means (VHM)

        Returns:
            Credibility factor between 0 and 1
        """
        if claim_count <= 0 or expected_process_variance <= 0 or variance_of_hypothetical_means <= 0:
            return 0.0

        k = expected_process_variance / variance_of_hypothetical_means
        credibility = claim_count / (claim_count + k)

        return min(max(credibility, 0.0), 1.0)

    def buhlmann_straub_credibility(self, exposures: List[float], claim_counts: List[int], 
                                   expected_process_variance: float, 
                                   variance_of_hypothetical_means: float) -> float:
        """
        Calculate credibility using the Bühlmann-Straub Credibility method.
        This extends the Bühlmann method to account for varying exposure sizes.

        Args:
            exposures: List of exposure values
            claim_counts: List of claim counts corresponding to each exposure
            expected_process_variance: Expected value of the process variance (EPV)
            variance_of_hypothetical_means: Variance of the hypothetical means (VHM)

        Returns:
            Credibility factor between 0 and 1
        """
        if not exposures or not claim_counts or len(exposures) != len(claim_counts):
            return 0.0

        if expected_process_variance <= 0 or variance_of_hypothetical_means <= 0:
            return 0.0

        total_exposure = sum(exposures)
        if total_exposure <= 0:
            return 0.0

        k = expected_process_variance / variance_of_hypothetical_means
        credibility = total_exposure / (total_exposure + k)

        return min(max(credibility, 0.0), 1.0)

    def greatest_accuracy_credibility(self, data: List[float], collective_mean: float) -> float:
        """
        Calculate credibility using the Greatest Accuracy Credibility method.
        This method aims to minimize the mean squared error.

        Args:
            data: List of observed values
            collective_mean: The collective mean (a priori estimate)

        Returns:
            Credibility factor between 0 and 1
        """
        if not data or collective_mean <= 0:
            return 0.0

        # Calculate individual mean and variance
        individual_mean = sum(data) / len(data)
        if individual_mean <= 0:
            return 0.0

        # Use the helper function to calculate variance
        individual_variance = self.calculate_data_variance(data, method="population")

        # Calculate between variance (estimate of variance of hypothetical means)
        between_variance = max(0, individual_variance - (collective_mean / len(data)))

        # Calculate credibility
        if between_variance <= 0:
            return 0.0

        credibility = between_variance / (between_variance + (individual_variance / len(data)))

        return min(max(credibility, 0.0), 1.0)

    def bayesian_credibility(self, prior_mean: float, prior_variance: float, 
                            data: List[float], data_variance: float) -> float:
        """
        Calculate credibility using Bayesian Credibility approach.

        Args:
            prior_mean: Mean of the prior distribution
            prior_variance: Variance of the prior distribution
            data: List of observed values
            data_variance: Variance of the data

        Returns:
            Credibility factor between 0 and 1
        """
        if not data or prior_variance <= 0 or data_variance <= 0:
            return 0.0

        n = len(data)
        if n <= 0:
            return 0.0

        # Bayesian credibility formula
        credibility = (n * prior_variance) / (n * prior_variance + data_variance)

        return min(max(credibility, 0.0), 1.0)

__init__(experience_data, exposure_data)

Initialize the CredibilityWeight class with experience and exposure data.

Parameters:

Name Type Description Default
experience_data ExperienceModelData

Experience model data containing claims and exposures

required
exposure_data ExposureModelData

Exposure model data containing exposures

required
Source code in src\pyre\Models\AggregateFeatures\selections.py
 7
 8
 9
10
11
12
13
14
15
16
def __init__(self, experience_data: ExperienceModelData, exposure_data: ExposureModelData) -> None:
    """
    Initialize the CredibilityWeight class with experience and exposure data.

    Args:
        experience_data: Experience model data containing claims and exposures
        exposure_data: Exposure model data containing exposures
    """
    self.experience_data = experience_data
    self.exposure_data = exposure_data

bayesian_credibility(prior_mean, prior_variance, data, data_variance)

Calculate credibility using Bayesian Credibility approach.

Parameters:

Name Type Description Default
prior_mean float

Mean of the prior distribution

required
prior_variance float

Variance of the prior distribution

required
data List[float]

List of observed values

required
data_variance float

Variance of the data

required

Returns:

Type Description
float

Credibility factor between 0 and 1

Source code in src\pyre\Models\AggregateFeatures\selections.py
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
def bayesian_credibility(self, prior_mean: float, prior_variance: float, 
                        data: List[float], data_variance: float) -> float:
    """
    Calculate credibility using Bayesian Credibility approach.

    Args:
        prior_mean: Mean of the prior distribution
        prior_variance: Variance of the prior distribution
        data: List of observed values
        data_variance: Variance of the data

    Returns:
        Credibility factor between 0 and 1
    """
    if not data or prior_variance <= 0 or data_variance <= 0:
        return 0.0

    n = len(data)
    if n <= 0:
        return 0.0

    # Bayesian credibility formula
    credibility = (n * prior_variance) / (n * prior_variance + data_variance)

    return min(max(credibility, 0.0), 1.0)

buhlmann_credibility(claim_count, expected_process_variance, variance_of_hypothetical_means)

Calculate credibility using the Bühlmann Credibility method. Based on the formula Z = n / (n + k) where k = EPV / VHM.

Parameters:

Name Type Description Default
claim_count int

Number of claims

required
expected_process_variance float

Expected value of the process variance (EPV)

required
variance_of_hypothetical_means float

Variance of the hypothetical means (VHM)

required

Returns:

Type Description
float

Credibility factor between 0 and 1

Source code in src\pyre\Models\AggregateFeatures\selections.py
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
def buhlmann_credibility(self, claim_count: int, expected_process_variance: float, 
                         variance_of_hypothetical_means: float) -> float:
    """
    Calculate credibility using the Bühlmann Credibility method.
    Based on the formula Z = n / (n + k) where k = EPV / VHM.

    Args:
        claim_count: Number of claims
        expected_process_variance: Expected value of the process variance (EPV)
        variance_of_hypothetical_means: Variance of the hypothetical means (VHM)

    Returns:
        Credibility factor between 0 and 1
    """
    if claim_count <= 0 or expected_process_variance <= 0 or variance_of_hypothetical_means <= 0:
        return 0.0

    k = expected_process_variance / variance_of_hypothetical_means
    credibility = claim_count / (claim_count + k)

    return min(max(credibility, 0.0), 1.0)

buhlmann_straub_credibility(exposures, claim_counts, expected_process_variance, variance_of_hypothetical_means)

Calculate credibility using the Bühlmann-Straub Credibility method. This extends the Bühlmann method to account for varying exposure sizes.

Parameters:

Name Type Description Default
exposures List[float]

List of exposure values

required
claim_counts List[int]

List of claim counts corresponding to each exposure

required
expected_process_variance float

Expected value of the process variance (EPV)

required
variance_of_hypothetical_means float

Variance of the hypothetical means (VHM)

required

Returns:

Type Description
float

Credibility factor between 0 and 1

Source code in src\pyre\Models\AggregateFeatures\selections.py
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
def buhlmann_straub_credibility(self, exposures: List[float], claim_counts: List[int], 
                               expected_process_variance: float, 
                               variance_of_hypothetical_means: float) -> float:
    """
    Calculate credibility using the Bühlmann-Straub Credibility method.
    This extends the Bühlmann method to account for varying exposure sizes.

    Args:
        exposures: List of exposure values
        claim_counts: List of claim counts corresponding to each exposure
        expected_process_variance: Expected value of the process variance (EPV)
        variance_of_hypothetical_means: Variance of the hypothetical means (VHM)

    Returns:
        Credibility factor between 0 and 1
    """
    if not exposures or not claim_counts or len(exposures) != len(claim_counts):
        return 0.0

    if expected_process_variance <= 0 or variance_of_hypothetical_means <= 0:
        return 0.0

    total_exposure = sum(exposures)
    if total_exposure <= 0:
        return 0.0

    k = expected_process_variance / variance_of_hypothetical_means
    credibility = total_exposure / (total_exposure + k)

    return min(max(credibility, 0.0), 1.0)

calculate_data_variance(data, method='sample')

Calculate the variance of data using different methods.

Parameters:

Name Type Description Default
data List[float]

List of observed values

required
method str

Method to use for variance calculation: "sample" - standard sample variance "population" - population variance "process" - estimate of process variance for credibility

'sample'

Returns:

Type Description
float

Variance of the data based on the specified method

Source code in src\pyre\Models\AggregateFeatures\selections.py
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
def calculate_data_variance(self, data: List[float], method: str = "sample") -> float:
    """
    Calculate the variance of data using different methods.

    Args:
        data: List of observed values
        method: Method to use for variance calculation:
               "sample" - standard sample variance
               "population" - population variance
               "process" - estimate of process variance for credibility

    Returns:
        Variance of the data based on the specified method
    """
    if not data or len(data) < 2:
        return 0.0

    mean = sum(data) / len(data)

    if method == "sample":
        # Sample variance (unbiased estimator)
        return sum((x - mean) ** 2 for x in data) / (len(data) - 1)
    elif method == "population":
        # Population variance
        return sum((x - mean) ** 2 for x in data) / len(data)
    elif method == "process":
        # Process variance estimate for credibility calculations
        # This is often the within-variance component
        return sum((x - mean) ** 2 for x in data) / len(data)
    else:
        raise ValueError(f"Invalid variance calculation method: {method}")

calculate_sample_variance(data)

Calculate the sample variance of a list of data points.

Parameters:

Name Type Description Default
data List[float]

List of observed values

required

Returns:

Type Description
float

Sample variance of the data

Source code in src\pyre\Models\AggregateFeatures\selections.py
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
def calculate_sample_variance(self, data: List[float]) -> float:
    """
    Calculate the sample variance of a list of data points.

    Args:
        data: List of observed values

    Returns:
        Sample variance of the data
    """
    if not data or len(data) < 2:
        return 0.0

    mean = sum(data) / len(data)
    variance = sum((x - mean) ** 2 for x in data) / (len(data) - 1)  # Using n-1 for sample variance

    return variance

estimate_process_variance(data_by_group)

Estimate the process variance (within variance) from grouped data. This is useful for Bühlmann and Bühlmann-Straub credibility methods.

Parameters:

Name Type Description Default
data_by_group Dict[Any, List[float]]

Dictionary mapping group identifiers to lists of observed values

required

Returns:

Type Description
float

Estimated process variance

Source code in src\pyre\Models\AggregateFeatures\selections.py
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
def estimate_process_variance(self, data_by_group: Dict[Any, List[float]]) -> float:
    """
    Estimate the process variance (within variance) from grouped data.
    This is useful for Bühlmann and Bühlmann-Straub credibility methods.

    Args:
        data_by_group: Dictionary mapping group identifiers to lists of observed values

    Returns:
        Estimated process variance
    """
    if not data_by_group:
        return 0.0

    total_variance = 0.0
    total_weight = 0.0

    for group, values in data_by_group.items():
        if len(values) < 2:
            continue

        # Calculate within-group variance
        group_mean = sum(values) / len(values)
        group_variance = sum((x - group_mean) ** 2 for x in values) / len(values)

        # Weight by group size
        weight = len(values)
        total_variance += group_variance * weight
        total_weight += weight

    if total_weight <= 0:
        return 0.0

    return total_variance / total_weight

estimate_variance_of_hypothetical_means(data_by_group)

Estimate the variance of hypothetical means (between variance) from grouped data. This is useful for Bühlmann and Bühlmann-Straub credibility methods.

Parameters:

Name Type Description Default
data_by_group Dict[Any, List[float]]

Dictionary mapping group identifiers to lists of observed values

required

Returns:

Type Description
float

Estimated variance of hypothetical means

Source code in src\pyre\Models\AggregateFeatures\selections.py
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
def estimate_variance_of_hypothetical_means(self, data_by_group: Dict[Any, List[float]]) -> float:
    """
    Estimate the variance of hypothetical means (between variance) from grouped data.
    This is useful for Bühlmann and Bühlmann-Straub credibility methods.

    Args:
        data_by_group: Dictionary mapping group identifiers to lists of observed values

    Returns:
        Estimated variance of hypothetical means
    """
    if not data_by_group:
        return 0.0

    # Calculate overall mean
    all_values = []
    for values in data_by_group.values():
        all_values.extend(values)

    if not all_values:
        return 0.0

    overall_mean = sum(all_values) / len(all_values)

    # Calculate group means
    group_means = {}
    group_sizes = {}

    for group, values in data_by_group.items():
        if not values:
            continue

        group_means[group] = sum(values) / len(values)
        group_sizes[group] = len(values)

    # Calculate variance of group means
    weighted_sum_squared_diff = 0.0
    total_weight = 0.0

    for group, mean in group_means.items():
        weight = group_sizes[group]
        weighted_sum_squared_diff += weight * ((mean - overall_mean) ** 2)
        total_weight += weight

    if total_weight <= 0:
        return 0.0

    # Calculate raw between variance
    raw_between_variance = weighted_sum_squared_diff / total_weight

    # Adjust for within-group variance
    process_variance = self.estimate_process_variance(data_by_group)

    # Calculate average group size
    avg_group_size = sum(group_sizes.values()) / len(group_sizes) if group_sizes else 0

    # Adjust between variance by removing the expected contribution from process variance
    adjusted_between_variance = max(0, raw_between_variance - (process_variance / avg_group_size))

    return adjusted_between_variance

greatest_accuracy_credibility(data, collective_mean)

Calculate credibility using the Greatest Accuracy Credibility method. This method aims to minimize the mean squared error.

Parameters:

Name Type Description Default
data List[float]

List of observed values

required
collective_mean float

The collective mean (a priori estimate)

required

Returns:

Type Description
float

Credibility factor between 0 and 1

Source code in src\pyre\Models\AggregateFeatures\selections.py
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
def greatest_accuracy_credibility(self, data: List[float], collective_mean: float) -> float:
    """
    Calculate credibility using the Greatest Accuracy Credibility method.
    This method aims to minimize the mean squared error.

    Args:
        data: List of observed values
        collective_mean: The collective mean (a priori estimate)

    Returns:
        Credibility factor between 0 and 1
    """
    if not data or collective_mean <= 0:
        return 0.0

    # Calculate individual mean and variance
    individual_mean = sum(data) / len(data)
    if individual_mean <= 0:
        return 0.0

    # Use the helper function to calculate variance
    individual_variance = self.calculate_data_variance(data, method="population")

    # Calculate between variance (estimate of variance of hypothetical means)
    between_variance = max(0, individual_variance - (collective_mean / len(data)))

    # Calculate credibility
    if between_variance <= 0:
        return 0.0

    credibility = between_variance / (between_variance + (individual_variance / len(data)))

    return min(max(credibility, 0.0), 1.0)

limited_fluctuation_credibility(claim_count, full_credibility_standard=1082)

Calculate credibility using the Limited Fluctuation (Classical) Credibility method. Based on the formula Z = min(sqrt(n/n_full), 1) where n_full is the full credibility standard.

The default full_credibility_standard of 1082 corresponds to a 95% confidence level with a 5% margin of error assuming a Poisson frequency process.

Parameters:

Name Type Description Default
claim_count int

Number of claims

required
full_credibility_standard int

Number of claims needed for full credibility

1082

Returns:

Type Description
float

Credibility factor between 0 and 1

Source code in src\pyre\Models\AggregateFeatures\selections.py
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
def limited_fluctuation_credibility(self, claim_count: int, full_credibility_standard: int = 1082) -> float:
    """
    Calculate credibility using the Limited Fluctuation (Classical) Credibility method.
    Based on the formula Z = min(sqrt(n/n_full), 1) where n_full is the full credibility standard.

    The default full_credibility_standard of 1082 corresponds to a 95% confidence level
    with a 5% margin of error assuming a Poisson frequency process.

    Args:
        claim_count: Number of claims
        full_credibility_standard: Number of claims needed for full credibility

    Returns:
        Credibility factor between 0 and 1
    """
    if claim_count <= 0 or full_credibility_standard <= 0:
        return 0.0

    credibility = math.sqrt(claim_count / full_credibility_standard)
    return min(credibility, 1.0)

Exposures

A container class for managing a collection of Exposure objects.

This class provides list-like behavior for storing and manipulating multiple Exposure instances. It supports indexing, slicing, iteration, and appending new exposures.

Attributes:

Name Type Description
exposures List[Exposure]

The list of Exposure objects managed by this container.

Parameters:

Name Type Description Default
exposures List[Exposure]

A list of Exposure objects to initialize the container.

required

Methods:

Name Description
append

Exposure): Appends an Exposure object to the collection.

__getitem__

Returns an Exposure or a new Exposures instance for slices.

__iter__

Returns an iterator over the exposures.

__len__

Returns the number of exposures in the collection.

Source code in src\pyre\exposures\exposures.py
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
class Exposures:
    """A container class for managing a collection of Exposure objects.

    This class provides list-like behavior for storing and manipulating multiple
    Exposure instances. It supports indexing, slicing, iteration, and appending
    new exposures.

    Attributes:
        exposures (List[Exposure]): The list of Exposure objects managed by this container.

    Args:
        exposures (List[Exposure]): A list of Exposure objects to initialize the container.

    Methods:
        append(exposure: Exposure): Appends an Exposure object to the collection.
        __getitem__(key): Returns an Exposure or a new Exposures instance for slices.
        __iter__(): Returns an iterator over the exposures.
        __len__(): Returns the number of exposures in the collection.
    """

    def __init__(self, exposures: List[Exposure])->None:
        self._exposures = exposures

    @property
    def exposures(self) -> List[Exposure]:
        """Returns the list of Exposure objects managed by this container."""
        return self._exposures

    @exposures.setter
    def exposures(self, list_of_exposure_classes: List[Exposure]) -> None:
        """Sets the list of Exposure objects managed by this container."""
        self._exposures = list_of_exposure_classes

    @property
    def modelling_years(self) -> List[int]: 
        """
        Returns a sorted list of unique modelling years for all exposures.

        Returns:
            List[int]: A sorted list of unique modelling years.
        """
        years = {exposure.modelling_year for exposure in self.exposures}
        return sorted(years)

    def append(self, exposure: Exposure) -> None:
        """Append an Exposure object to the collection.

        Args:
            exposure (Exposure): The Exposure object to append.
        """
        self._exposures.append(exposure)

    def __getitem__(self, key):
        """Get an Exposure object by index or a slice of Exposures.

        Args:
            key: An integer index or a slice object.

        Returns:
            Union[Exposure, 'Exposures']: An Exposure object if key is an integer,
                                         or a new Exposures instance if key is a slice.
        """
        if isinstance(key, slice):
            cls = type(self)
            return cls(self._exposures[key])
        index = operator.index(key)
        return self._exposures[index]

    def __iter__(self):
        """Return an iterator over the exposures.

        Returns:
            Iterator[Exposure]: An iterator over the Exposure objects.
        """
        return iter(self._exposures)

    def __len__(self) -> int:
        """Return the number of exposures in the collection.

        Returns:
            int: The number of Exposure objects.
        """
        return len(self._exposures)

exposures property writable

Returns the list of Exposure objects managed by this container.

modelling_years property

Returns a sorted list of unique modelling years for all exposures.

Returns:

Type Description
List[int]

List[int]: A sorted list of unique modelling years.

__getitem__(key)

Get an Exposure object by index or a slice of Exposures.

Parameters:

Name Type Description Default
key

An integer index or a slice object.

required

Returns:

Type Description

Union[Exposure, 'Exposures']: An Exposure object if key is an integer, or a new Exposures instance if key is a slice.

Source code in src\pyre\exposures\exposures.py
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
def __getitem__(self, key):
    """Get an Exposure object by index or a slice of Exposures.

    Args:
        key: An integer index or a slice object.

    Returns:
        Union[Exposure, 'Exposures']: An Exposure object if key is an integer,
                                     or a new Exposures instance if key is a slice.
    """
    if isinstance(key, slice):
        cls = type(self)
        return cls(self._exposures[key])
    index = operator.index(key)
    return self._exposures[index]

__iter__()

Return an iterator over the exposures.

Returns:

Type Description

Iterator[Exposure]: An iterator over the Exposure objects.

Source code in src\pyre\exposures\exposures.py
455
456
457
458
459
460
461
def __iter__(self):
    """Return an iterator over the exposures.

    Returns:
        Iterator[Exposure]: An iterator over the Exposure objects.
    """
    return iter(self._exposures)

__len__()

Return the number of exposures in the collection.

Returns:

Name Type Description
int int

The number of Exposure objects.

Source code in src\pyre\exposures\exposures.py
463
464
465
466
467
468
469
def __len__(self) -> int:
    """Return the number of exposures in the collection.

    Returns:
        int: The number of Exposure objects.
    """
    return len(self._exposures)

append(exposure)

Append an Exposure object to the collection.

Parameters:

Name Type Description Default
exposure Exposure

The Exposure object to append.

required
Source code in src\pyre\exposures\exposures.py
431
432
433
434
435
436
437
def append(self, exposure: Exposure) -> None:
    """Append an Exposure object to the collection.

    Args:
        exposure (Exposure): The Exposure object to append.
    """
    self._exposures.append(exposure)

Selections

Source code in src\pyre\Models\AggregateFeatures\selections.py
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
class Selections:
    def __init__(self, experience_data: ExperienceModelData, exposure_data: ExposureModelData, 
                 credibility_weight: Optional[CredibilityWeight] = None) -> None:
        """
        Initialize the Selections class with experience and exposure data.

        Args:
            experience_data: Experience model data containing claims and exposures
            exposure_data: Exposure model data containing exposures
            credibility_weight: Optional CredibilityWeight object for calculating weights
        """
        self.experience_data = experience_data
        self.exposure_data = exposure_data
        self.credibility_weight = credibility_weight or CredibilityWeight(experience_data, exposure_data)
        self._experience_weight = 0.5  # Default weight
        self._exposure_weight = 0.5    # Default weight

    def calculate_experience_weight(self, method: str = "limited_fluctuation", **kwargs) -> float:
        """
        Calculate the weight to assign to experience rating.

        Args:
            method: Credibility method to use ("limited_fluctuation", "buhlmann", 
                   "buhlmann_straub", "greatest_accuracy", or "bayesian")
            **kwargs: Additional parameters for the credibility method

        Returns:
            Weight for experience rating between 0 and 1
        """
        # Get claim count if not provided
        if 'claim_count' not in kwargs and hasattr(self.experience_data, 'subject_contract_claims'):
            kwargs['claim_count'] = len(self.experience_data.subject_contract_claims())

        # Calculate credibility based on the selected method
        if method == "limited_fluctuation":
            weight = self.credibility_weight.limited_fluctuation_credibility(
                kwargs.get('claim_count', 0), 
                kwargs.get('full_credibility_standard', 1082)
            )
        elif method == "buhlmann":
            # Get claim count if not already provided
            claim_count = kwargs.get('claim_count', 0)

            # Get or calculate process variance and variance of hypothetical means
            if 'data_by_group' in kwargs:
                data_by_group = kwargs.get('data_by_group', {})

                # Calculate variances if not provided
                if 'expected_process_variance' not in kwargs:
                    kwargs['expected_process_variance'] = self.credibility_weight.estimate_process_variance(data_by_group)

                if 'variance_of_hypothetical_means' not in kwargs:
                    kwargs['variance_of_hypothetical_means'] = self.credibility_weight.estimate_variance_of_hypothetical_means(data_by_group)

            weight = self.credibility_weight.buhlmann_credibility(
                claim_count,
                kwargs.get('expected_process_variance', 1.0),
                kwargs.get('variance_of_hypothetical_means', 0.1)
            )
        elif method == "buhlmann_straub":
            # Get exposures and claim counts
            exposures = kwargs.get('exposures', [])
            claim_counts = kwargs.get('claim_counts', [])

            # Get or calculate process variance and variance of hypothetical means
            if 'data_by_group' in kwargs:
                data_by_group = kwargs.get('data_by_group', {})

                # Calculate variances if not provided
                if 'expected_process_variance' not in kwargs:
                    kwargs['expected_process_variance'] = self.credibility_weight.estimate_process_variance(data_by_group)

                if 'variance_of_hypothetical_means' not in kwargs:
                    kwargs['variance_of_hypothetical_means'] = self.credibility_weight.estimate_variance_of_hypothetical_means(data_by_group)

            weight = self.credibility_weight.buhlmann_straub_credibility(
                exposures,
                claim_counts,
                kwargs.get('expected_process_variance', 1.0),
                kwargs.get('variance_of_hypothetical_means', 0.1)
            )
        elif method == "greatest_accuracy":
            # Get loss data if not provided
            if 'data' not in kwargs and hasattr(self.experience_data, 'subject_contract_claims'):
                kwargs['data'] = [claim.amount for claim in self.experience_data.subject_contract_claims()]

            # Get data for easier access
            data = kwargs.get('data', [])

            weight = self.credibility_weight.greatest_accuracy_credibility(
                data,
                kwargs.get('collective_mean', 1.0)
            )
        elif method == "bayesian":
            # Get loss data if not provided
            if 'data' not in kwargs and hasattr(self.experience_data, 'subject_contract_claims'):
                kwargs['data'] = [claim.amount for claim in self.experience_data.subject_contract_claims()]

            # Calculate data variance if not provided
            data = kwargs.get('data', [])
            if 'data_variance' not in kwargs and data:
                kwargs['data_variance'] = self.credibility_weight.calculate_data_variance(data, method="sample")

            weight = self.credibility_weight.bayesian_credibility(
                kwargs.get('prior_mean', 1.0),
                kwargs.get('prior_variance', 0.1),
                data,
                kwargs.get('data_variance', 1.0)
            )
        else:
            raise ValueError(f"Invalid credibility method: {method}")

        self._experience_weight = weight
        self._exposure_weight = 1.0 - weight

        return weight

    def exposure_weight(self) -> float:
        """
        Get the weight to assign to exposure rating.

        Returns:
            Weight for exposure rating between 0 and 1
        """
        return self._exposure_weight

    def unlimited_selection(self, experience_result: float, exposure_result: float) -> float:
        """
        Selects the unlimited option for the subject contract by combining
        experience and exposure results based on their weights.

        Args:
            experience_result: Result from experience rating method
            exposure_result: Result from exposure rating method

        Returns:
            Weighted average of experience and exposure results
        """
        return (experience_result * self._experience_weight + 
                exposure_result * self._exposure_weight)

    def make_selection(self, experience_result: float, exposure_result: float, 
                      method: str = "limited_fluctuation", **kwargs) -> float:
        """
        Make a selection by calculating weights and combining results.

        Args:
            experience_result: Result from experience rating method
            exposure_result: Result from exposure rating method
            method: Credibility method to use
            **kwargs: Additional parameters for the credibility method

        Returns:
            Selected result based on weighted average
        """
        # Calculate weights
        self.calculate_experience_weight(method, **kwargs)

        # Return weighted average
        return self.unlimited_selection(experience_result, exposure_result)

__init__(experience_data, exposure_data, credibility_weight=None)

Initialize the Selections class with experience and exposure data.

Parameters:

Name Type Description Default
experience_data ExperienceModelData

Experience model data containing claims and exposures

required
exposure_data ExposureModelData

Exposure model data containing exposures

required
credibility_weight Optional[CredibilityWeight]

Optional CredibilityWeight object for calculating weights

None
Source code in src\pyre\Models\AggregateFeatures\selections.py
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
def __init__(self, experience_data: ExperienceModelData, exposure_data: ExposureModelData, 
             credibility_weight: Optional[CredibilityWeight] = None) -> None:
    """
    Initialize the Selections class with experience and exposure data.

    Args:
        experience_data: Experience model data containing claims and exposures
        exposure_data: Exposure model data containing exposures
        credibility_weight: Optional CredibilityWeight object for calculating weights
    """
    self.experience_data = experience_data
    self.exposure_data = exposure_data
    self.credibility_weight = credibility_weight or CredibilityWeight(experience_data, exposure_data)
    self._experience_weight = 0.5  # Default weight
    self._exposure_weight = 0.5    # Default weight

calculate_experience_weight(method='limited_fluctuation', **kwargs)

Calculate the weight to assign to experience rating.

Parameters:

Name Type Description Default
method str

Credibility method to use ("limited_fluctuation", "buhlmann", "buhlmann_straub", "greatest_accuracy", or "bayesian")

'limited_fluctuation'
**kwargs

Additional parameters for the credibility method

{}

Returns:

Type Description
float

Weight for experience rating between 0 and 1

Source code in src\pyre\Models\AggregateFeatures\selections.py
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
def calculate_experience_weight(self, method: str = "limited_fluctuation", **kwargs) -> float:
    """
    Calculate the weight to assign to experience rating.

    Args:
        method: Credibility method to use ("limited_fluctuation", "buhlmann", 
               "buhlmann_straub", "greatest_accuracy", or "bayesian")
        **kwargs: Additional parameters for the credibility method

    Returns:
        Weight for experience rating between 0 and 1
    """
    # Get claim count if not provided
    if 'claim_count' not in kwargs and hasattr(self.experience_data, 'subject_contract_claims'):
        kwargs['claim_count'] = len(self.experience_data.subject_contract_claims())

    # Calculate credibility based on the selected method
    if method == "limited_fluctuation":
        weight = self.credibility_weight.limited_fluctuation_credibility(
            kwargs.get('claim_count', 0), 
            kwargs.get('full_credibility_standard', 1082)
        )
    elif method == "buhlmann":
        # Get claim count if not already provided
        claim_count = kwargs.get('claim_count', 0)

        # Get or calculate process variance and variance of hypothetical means
        if 'data_by_group' in kwargs:
            data_by_group = kwargs.get('data_by_group', {})

            # Calculate variances if not provided
            if 'expected_process_variance' not in kwargs:
                kwargs['expected_process_variance'] = self.credibility_weight.estimate_process_variance(data_by_group)

            if 'variance_of_hypothetical_means' not in kwargs:
                kwargs['variance_of_hypothetical_means'] = self.credibility_weight.estimate_variance_of_hypothetical_means(data_by_group)

        weight = self.credibility_weight.buhlmann_credibility(
            claim_count,
            kwargs.get('expected_process_variance', 1.0),
            kwargs.get('variance_of_hypothetical_means', 0.1)
        )
    elif method == "buhlmann_straub":
        # Get exposures and claim counts
        exposures = kwargs.get('exposures', [])
        claim_counts = kwargs.get('claim_counts', [])

        # Get or calculate process variance and variance of hypothetical means
        if 'data_by_group' in kwargs:
            data_by_group = kwargs.get('data_by_group', {})

            # Calculate variances if not provided
            if 'expected_process_variance' not in kwargs:
                kwargs['expected_process_variance'] = self.credibility_weight.estimate_process_variance(data_by_group)

            if 'variance_of_hypothetical_means' not in kwargs:
                kwargs['variance_of_hypothetical_means'] = self.credibility_weight.estimate_variance_of_hypothetical_means(data_by_group)

        weight = self.credibility_weight.buhlmann_straub_credibility(
            exposures,
            claim_counts,
            kwargs.get('expected_process_variance', 1.0),
            kwargs.get('variance_of_hypothetical_means', 0.1)
        )
    elif method == "greatest_accuracy":
        # Get loss data if not provided
        if 'data' not in kwargs and hasattr(self.experience_data, 'subject_contract_claims'):
            kwargs['data'] = [claim.amount for claim in self.experience_data.subject_contract_claims()]

        # Get data for easier access
        data = kwargs.get('data', [])

        weight = self.credibility_weight.greatest_accuracy_credibility(
            data,
            kwargs.get('collective_mean', 1.0)
        )
    elif method == "bayesian":
        # Get loss data if not provided
        if 'data' not in kwargs and hasattr(self.experience_data, 'subject_contract_claims'):
            kwargs['data'] = [claim.amount for claim in self.experience_data.subject_contract_claims()]

        # Calculate data variance if not provided
        data = kwargs.get('data', [])
        if 'data_variance' not in kwargs and data:
            kwargs['data_variance'] = self.credibility_weight.calculate_data_variance(data, method="sample")

        weight = self.credibility_weight.bayesian_credibility(
            kwargs.get('prior_mean', 1.0),
            kwargs.get('prior_variance', 0.1),
            data,
            kwargs.get('data_variance', 1.0)
        )
    else:
        raise ValueError(f"Invalid credibility method: {method}")

    self._experience_weight = weight
    self._exposure_weight = 1.0 - weight

    return weight

exposure_weight()

Get the weight to assign to exposure rating.

Returns:

Type Description
float

Weight for exposure rating between 0 and 1

Source code in src\pyre\Models\AggregateFeatures\selections.py
416
417
418
419
420
421
422
423
def exposure_weight(self) -> float:
    """
    Get the weight to assign to exposure rating.

    Returns:
        Weight for exposure rating between 0 and 1
    """
    return self._exposure_weight

make_selection(experience_result, exposure_result, method='limited_fluctuation', **kwargs)

Make a selection by calculating weights and combining results.

Parameters:

Name Type Description Default
experience_result float

Result from experience rating method

required
exposure_result float

Result from exposure rating method

required
method str

Credibility method to use

'limited_fluctuation'
**kwargs

Additional parameters for the credibility method

{}

Returns:

Type Description
float

Selected result based on weighted average

Source code in src\pyre\Models\AggregateFeatures\selections.py
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
def make_selection(self, experience_result: float, exposure_result: float, 
                  method: str = "limited_fluctuation", **kwargs) -> float:
    """
    Make a selection by calculating weights and combining results.

    Args:
        experience_result: Result from experience rating method
        exposure_result: Result from exposure rating method
        method: Credibility method to use
        **kwargs: Additional parameters for the credibility method

    Returns:
        Selected result based on weighted average
    """
    # Calculate weights
    self.calculate_experience_weight(method, **kwargs)

    # Return weighted average
    return self.unlimited_selection(experience_result, exposure_result)

unlimited_selection(experience_result, exposure_result)

Selects the unlimited option for the subject contract by combining experience and exposure results based on their weights.

Parameters:

Name Type Description Default
experience_result float

Result from experience rating method

required
exposure_result float

Result from exposure rating method

required

Returns:

Type Description
float

Weighted average of experience and exposure results

Source code in src\pyre\Models\AggregateFeatures\selections.py
425
426
427
428
429
430
431
432
433
434
435
436
437
438
def unlimited_selection(self, experience_result: float, exposure_result: float) -> float:
    """
    Selects the unlimited option for the subject contract by combining
    experience and exposure results based on their weights.

    Args:
        experience_result: Result from experience rating method
        exposure_result: Result from exposure rating method

    Returns:
        Weighted average of experience and exposure results
    """
    return (experience_result * self._experience_weight + 
            exposure_result * self._exposure_weight)

calculate_curve(curve_type, parameters, position)

Calculate curve value based on curve type and parameters.

Parameters:

Name Type Description Default
curve_type ExposureCurveType

Type of curve to use

required
parameters Dict[str, Any]

Dictionary containing curve-specific parameters

required
position float

Position on the curve

required

Returns:

Name Type Description
float float

Calculated curve value

Source code in src\pyre\Models\Exposure\exposure_curve_functions.py
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
def calculate_curve(curve_type: ExposureCurveType, parameters: Dict[str, Any], position: float) -> float:
    """Calculate curve value based on curve type and parameters.

    Args:
        curve_type: Type of curve to use
        parameters: Dictionary containing curve-specific parameters
        position: Position on the curve

    Returns:
        float: Calculated curve value
    """
    if curve_type not in exposure_curve_calculation:
        raise ValueError(f"Unsupported curve type: {curve_type}")

    func = exposure_curve_calculation[curve_type]
    return func(**parameters, curve_position=position)

Trending

A class for trending insurance data (claims and exposures) to a common base year.

This class provides methods to apply trend factors to claims and exposures, adjusting their values to account for inflation or other time-based changes.

Attributes:

Name Type Description
exposure_trend_factors Dict[int, float]

Mapping of year to annual trend factor for exposures (e.g., {2020: 1.02, 2021: 1.03, ...}).

claim_trend_factors Dict[int, float]

Mapping of year to annual trend factor for claims (e.g., {2020: 1.02, 2021: 1.03, ...}).

base_year int

The year to which all data will be trended.

Source code in src\pyre\Models\trending.py
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
class Trending:
    """
    A class for trending insurance data (claims and exposures) to a common base year.

    This class provides methods to apply trend factors to claims and exposures,
    adjusting their values to account for inflation or other time-based changes.

    Attributes:
        exposure_trend_factors (Dict[int, float]): Mapping of year to annual trend factor for exposures
            (e.g., {2020: 1.02, 2021: 1.03, ...}).
        claim_trend_factors (Dict[int, float]): Mapping of year to annual trend factor for claims
            (e.g., {2020: 1.02, 2021: 1.03, ...}).
        base_year (int): The year to which all data will be trended.
    """

    def __init__(self, exposure_trend_factors: Dict[int, float], claim_trend_factors: Dict[int, float] = None, trend_factors: Dict[int, float] = None, base_year: int = None):
        """
        Initialize the Trending class with trend factors and a base year.

        Args:
            exposure_trend_factors (Dict[int, float]): Mapping of year to annual trend factor for exposures
                (e.g., {2020: 1.02, 2021: 1.03, ...}).
            claim_trend_factors (Dict[int, float], optional): Mapping of year to annual trend factor for claims.
                If None, exposure_trend_factors will be used for claims as well.
            trend_factors (Dict[int, float], optional): For backward compatibility. If provided, both
                exposure_trend_factors and claim_trend_factors will be set to this value.
            base_year (int): The year to which all data will be trended.
        """
        self.exposure_trend_factors = exposure_trend_factors
        self.claim_trend_factors = claim_trend_factors
        self.base_year = base_year
        self._validate_inputs()

    def _validate_inputs(self) -> None:
        """
        Validate the trend factors and base year.

        Raises:
            ValueError: If trend factors dictionaries are empty or base_year is not an integer.
        """
        if not self.exposure_trend_factors:
            raise ValueError("Exposure trend factors dictionary cannot be empty")
        if not self.claim_trend_factors:
            raise ValueError("Claim trend factors dictionary cannot be empty")
        if not isinstance(self.base_year, int):
            raise ValueError("Base year must be an integer")

    def calculate_trend_factor(self, origin_year: int, for_claims: bool = False) -> float:
        """
        Calculate the trend factor between the origin year and the base year.

        Args:
            origin_year (int): The year from which to trend.
            for_claims (bool, optional): If True, use claim trend factors. If False, use exposure trend factors.
                Defaults to False.

        Returns:
            float: The calculated trend factor.
        """
        # Select the appropriate trend factors based on the for_claims parameter
        trend_factors = self.claim_trend_factors if for_claims else self.exposure_trend_factors

        if origin_year == self.base_year:
            return 1.0
        elif origin_year < self.base_year:
            factor = 1.0
            for year in range(origin_year, self.base_year):
                factor *= trend_factors.get(year, 1.0)
            return factor
        else:
            factor = 1.0
            for year in range(self.base_year, origin_year):
                factor /= trend_factors.get(year, 1.0)
            return factor

    def trend_exposures(self, exposures: Exposures) -> Exposures:
        """
        Apply trend factors to a collection of exposures.

        Args:
            exposures (Exposures): The original Exposures object.

        Returns:
            Exposures: A new Exposures object with trended values.
        """
        trended_exposures = []

        for exposure in exposures:
            # Get the modelling year and exposure value
            origin_year = exposure.modelling_year()

            # Create a new exposure with trended values
            # Use exposure trend factors (for_claims=False is the default)
            trend_factor = self.calculate_trend_factor(origin_year, for_claims=False)

            # Get the original exposure values
            original_values = exposure.exposure_values()
            trended_value = original_values.exposure_value * trend_factor

            # Create new ExposureValues with the trended value
            new_values = ExposureValues(
                exposure_value=trended_value,
                attachment_point=original_values.attachment_point,
                limit=original_values.limit
            )

            # Create a new Exposure with the same metadata but trended values
            new_exposure = Exposure(
                exposure_meta=exposure.exposure_meta,
                exposure_values=new_values
            )

            trended_exposures.append(new_exposure)

        return Exposures(trended_exposures)

    def get_trend_factors(self) -> Dict[str, Dict[int, float]]:
        """
        Get the trend factors from this Trending instance.

        Returns:
            Dict[str, Dict[int, float]]: A dictionary with keys 'exposure' and 'claim', each mapping to
                their respective trend factors dictionary.
        """
        return {
            'exposure': self.exposure_trend_factors,
            'claim': self.claim_trend_factors
        }

    def trend_claims(self, claims: Claims) -> Claims:
        """
        Apply trend factors to a collection of claims.

        Args:
            claims (Claims): The original Claims object.

        Returns:
            Claims: A new Claims object with trended ClaimDevelopmentHistory for each claim.
        """
        trended_claims = []

        for claim in claims.claims:
            # Get the modelling year for trending
            origin_year = claim.claims_meta_data.modelling_year
            # Use claim trend factors (for_claims=True)
            trend_factor = self.calculate_trend_factor(origin_year, for_claims=True)

            # Get the development history
            dev_hist = claim.uncapped_claim_development_history

            # Trend all paid and incurred values in the development history
            trended_paid = [x * trend_factor for x in dev_hist.cumulative_dev_paid]
            trended_incurred = [x * trend_factor for x in dev_hist.cumulative_dev_incurred]

            # Create a new development history with trended values
            trended_dev_hist = ClaimDevelopmentHistory(
                development_months=dev_hist.development_months,
                cumulative_dev_paid=trended_paid,
                cumulative_dev_incurred=trended_incurred,
            )

            # Create a new Claim with the same metadata and trended development history
            trended_claim = Claim(
                claims_meta_data=claim.claims_meta_data, 
                claims_development_history=trended_dev_hist
            )

            trended_claims.append(trended_claim)

        return Claims(trended_claims)

__init__(exposure_trend_factors, claim_trend_factors=None, trend_factors=None, base_year=None)

Initialize the Trending class with trend factors and a base year.

Parameters:

Name Type Description Default
exposure_trend_factors Dict[int, float]

Mapping of year to annual trend factor for exposures (e.g., {2020: 1.02, 2021: 1.03, ...}).

required
claim_trend_factors Dict[int, float]

Mapping of year to annual trend factor for claims. If None, exposure_trend_factors will be used for claims as well.

None
trend_factors Dict[int, float]

For backward compatibility. If provided, both exposure_trend_factors and claim_trend_factors will be set to this value.

None
base_year int

The year to which all data will be trended.

None
Source code in src\pyre\Models\trending.py
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
def __init__(self, exposure_trend_factors: Dict[int, float], claim_trend_factors: Dict[int, float] = None, trend_factors: Dict[int, float] = None, base_year: int = None):
    """
    Initialize the Trending class with trend factors and a base year.

    Args:
        exposure_trend_factors (Dict[int, float]): Mapping of year to annual trend factor for exposures
            (e.g., {2020: 1.02, 2021: 1.03, ...}).
        claim_trend_factors (Dict[int, float], optional): Mapping of year to annual trend factor for claims.
            If None, exposure_trend_factors will be used for claims as well.
        trend_factors (Dict[int, float], optional): For backward compatibility. If provided, both
            exposure_trend_factors and claim_trend_factors will be set to this value.
        base_year (int): The year to which all data will be trended.
    """
    self.exposure_trend_factors = exposure_trend_factors
    self.claim_trend_factors = claim_trend_factors
    self.base_year = base_year
    self._validate_inputs()

calculate_trend_factor(origin_year, for_claims=False)

Calculate the trend factor between the origin year and the base year.

Parameters:

Name Type Description Default
origin_year int

The year from which to trend.

required
for_claims bool

If True, use claim trend factors. If False, use exposure trend factors. Defaults to False.

False

Returns:

Name Type Description
float float

The calculated trend factor.

Source code in src\pyre\Models\trending.py
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
def calculate_trend_factor(self, origin_year: int, for_claims: bool = False) -> float:
    """
    Calculate the trend factor between the origin year and the base year.

    Args:
        origin_year (int): The year from which to trend.
        for_claims (bool, optional): If True, use claim trend factors. If False, use exposure trend factors.
            Defaults to False.

    Returns:
        float: The calculated trend factor.
    """
    # Select the appropriate trend factors based on the for_claims parameter
    trend_factors = self.claim_trend_factors if for_claims else self.exposure_trend_factors

    if origin_year == self.base_year:
        return 1.0
    elif origin_year < self.base_year:
        factor = 1.0
        for year in range(origin_year, self.base_year):
            factor *= trend_factors.get(year, 1.0)
        return factor
    else:
        factor = 1.0
        for year in range(self.base_year, origin_year):
            factor /= trend_factors.get(year, 1.0)
        return factor

get_trend_factors()

Get the trend factors from this Trending instance.

Returns:

Type Description
Dict[str, Dict[int, float]]

Dict[str, Dict[int, float]]: A dictionary with keys 'exposure' and 'claim', each mapping to their respective trend factors dictionary.

Source code in src\pyre\Models\trending.py
122
123
124
125
126
127
128
129
130
131
132
133
def get_trend_factors(self) -> Dict[str, Dict[int, float]]:
    """
    Get the trend factors from this Trending instance.

    Returns:
        Dict[str, Dict[int, float]]: A dictionary with keys 'exposure' and 'claim', each mapping to
            their respective trend factors dictionary.
    """
    return {
        'exposure': self.exposure_trend_factors,
        'claim': self.claim_trend_factors
    }

trend_claims(claims)

Apply trend factors to a collection of claims.

Parameters:

Name Type Description Default
claims Claims

The original Claims object.

required

Returns:

Name Type Description
Claims Claims

A new Claims object with trended ClaimDevelopmentHistory for each claim.

Source code in src\pyre\Models\trending.py
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
def trend_claims(self, claims: Claims) -> Claims:
    """
    Apply trend factors to a collection of claims.

    Args:
        claims (Claims): The original Claims object.

    Returns:
        Claims: A new Claims object with trended ClaimDevelopmentHistory for each claim.
    """
    trended_claims = []

    for claim in claims.claims:
        # Get the modelling year for trending
        origin_year = claim.claims_meta_data.modelling_year
        # Use claim trend factors (for_claims=True)
        trend_factor = self.calculate_trend_factor(origin_year, for_claims=True)

        # Get the development history
        dev_hist = claim.uncapped_claim_development_history

        # Trend all paid and incurred values in the development history
        trended_paid = [x * trend_factor for x in dev_hist.cumulative_dev_paid]
        trended_incurred = [x * trend_factor for x in dev_hist.cumulative_dev_incurred]

        # Create a new development history with trended values
        trended_dev_hist = ClaimDevelopmentHistory(
            development_months=dev_hist.development_months,
            cumulative_dev_paid=trended_paid,
            cumulative_dev_incurred=trended_incurred,
        )

        # Create a new Claim with the same metadata and trended development history
        trended_claim = Claim(
            claims_meta_data=claim.claims_meta_data, 
            claims_development_history=trended_dev_hist
        )

        trended_claims.append(trended_claim)

    return Claims(trended_claims)

trend_exposures(exposures)

Apply trend factors to a collection of exposures.

Parameters:

Name Type Description Default
exposures Exposures

The original Exposures object.

required

Returns:

Name Type Description
Exposures Exposures

A new Exposures object with trended values.

Source code in src\pyre\Models\trending.py
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
def trend_exposures(self, exposures: Exposures) -> Exposures:
    """
    Apply trend factors to a collection of exposures.

    Args:
        exposures (Exposures): The original Exposures object.

    Returns:
        Exposures: A new Exposures object with trended values.
    """
    trended_exposures = []

    for exposure in exposures:
        # Get the modelling year and exposure value
        origin_year = exposure.modelling_year()

        # Create a new exposure with trended values
        # Use exposure trend factors (for_claims=False is the default)
        trend_factor = self.calculate_trend_factor(origin_year, for_claims=False)

        # Get the original exposure values
        original_values = exposure.exposure_values()
        trended_value = original_values.exposure_value * trend_factor

        # Create new ExposureValues with the trended value
        new_values = ExposureValues(
            exposure_value=trended_value,
            attachment_point=original_values.attachment_point,
            limit=original_values.limit
        )

        # Create a new Exposure with the same metadata but trended values
        new_exposure = Exposure(
            exposure_meta=exposure.exposure_meta,
            exposure_values=new_values
        )

        trended_exposures.append(new_exposure)

    return Exposures(trended_exposures)

calculate_trend_factor(origin_year, base_year, trend_factors, for_claims=False)

Calculate the trend factor between the origin year and the base year.

Parameters:

Name Type Description Default
origin_year int

The year from which to trend.

required
base_year int

The year to which to trend.

required
trend_factors Dict[int, float]

Mapping of year to annual trend factor.

required
for_claims bool

If True, use as claim trend factors. If False, use as exposure trend factors. Defaults to False.

False

Returns:

Name Type Description
float float

The calculated trend factor.

Source code in src\pyre\Models\trending.py
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
def calculate_trend_factor(origin_year: int, base_year: int, trend_factors: Dict[int, float], for_claims: bool = False) -> float:
    """
    Calculate the trend factor between the origin year and the base year.

    Args:
        origin_year (int): The year from which to trend.
        base_year (int): The year to which to trend.
        trend_factors (Dict[int, float]): Mapping of year to annual trend factor.
        for_claims (bool, optional): If True, use as claim trend factors. If False, use as exposure trend factors.
            Defaults to False.

    Returns:
        float: The calculated trend factor.
    """
    if for_claims:
        trending = Trending(exposure_trend_factors=trend_factors, claim_trend_factors=trend_factors, base_year=base_year)
        return trending.calculate_trend_factor(origin_year, for_claims=True)
    else:
        trending = Trending(exposure_trend_factors=trend_factors, claim_trend_factors=trend_factors, base_year=base_year)
        return trending.calculate_trend_factor(origin_year, for_claims=False)

get_trend_factors(trending_instance)

Get the trend factors from a Trending instance.

Parameters:

Name Type Description Default
trending_instance Trending

The Trending instance to get trend factors from.

required

Returns:

Type Description
Dict[str, Dict[int, float]]

Dict[str, Dict[int, float]]: A dictionary with keys 'exposure' and 'claim', each mapping to their respective trend factors dictionary.

Source code in src\pyre\Models\trending.py
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
def get_trend_factors(trending_instance: Trending) -> Dict[str, Dict[int, float]]:
    """
    Get the trend factors from a Trending instance.

    Args:
        trending_instance (Trending): The Trending instance to get trend factors from.

    Returns:
        Dict[str, Dict[int, float]]: A dictionary with keys 'exposure' and 'claim', each mapping to
            their respective trend factors dictionary.
    """
    return {
        'exposure': trending_instance.exposure_trend_factors,
        'claim': trending_instance.claim_trend_factors
    }

trend_claims(claims, trend_factors, base_year)

Apply trend factors to a collection of claims.

Parameters:

Name Type Description Default
claims Claims

The original Claims object.

required
trend_factors Dict[int, float]

Mapping of year to annual trend factor.

required
base_year int

The year to which all claims will be trended.

required

Returns:

Name Type Description
Claims Claims

A new Claims object with trended ClaimDevelopmentHistory for each claim.

Source code in src\pyre\Models\trending.py
217
218
219
220
221
222
223
224
225
226
227
228
229
230
def trend_claims(claims: Claims, trend_factors: Dict[int, float], base_year: int) -> Claims:
    """
    Apply trend factors to a collection of claims.

    Args:
        claims (Claims): The original Claims object.
        trend_factors (Dict[int, float]): Mapping of year to annual trend factor.
        base_year (int): The year to which all claims will be trended.

    Returns:
        Claims: A new Claims object with trended ClaimDevelopmentHistory for each claim.
    """
    trending = Trending(exposure_trend_factors=trend_factors, claim_trend_factors=trend_factors, base_year=base_year)
    return trending.trend_claims(claims)

trend_exposures(exposures, trend_factors, base_year)

Apply trend factors to a collection of exposures.

Parameters:

Name Type Description Default
exposures Exposures

The original Exposures object.

required
trend_factors Dict[int, float]

Mapping of year to annual trend factor.

required
base_year int

The year to which all exposures will be trended.

required

Returns:

Name Type Description
Exposures Exposures

A new Exposures object with trended values.

Source code in src\pyre\Models\trending.py
201
202
203
204
205
206
207
208
209
210
211
212
213
214
def trend_exposures(exposures: Exposures, trend_factors: Dict[int, float], base_year: int) -> Exposures:
    """
    Apply trend factors to a collection of exposures.

    Args:
        exposures (Exposures): The original Exposures object.
        trend_factors (Dict[int, float]): Mapping of year to annual trend factor.
        base_year (int): The year to which all exposures will be trended.

    Returns:
        Exposures: A new Exposures object with trended values.
    """
    trending = Trending(exposure_trend_factors=trend_factors, claim_trend_factors=trend_factors, base_year=base_year)
    return trending.trend_exposures(exposures)