diff --git a/aztec_gddt/experiment.py b/aztec_gddt/experiment.py index 45d51e5..94bdf30 100644 --- a/aztec_gddt/experiment.py +++ b/aztec_gddt/experiment.py @@ -5,7 +5,12 @@ from cadCAD.tools.preparation import sweep_cartesian_product # type: ignore from aztec_gddt.params import INITIAL_STATE from aztec_gddt.psuu.tensor_transform import timestep_tensor_to_trajectory_tensor -from aztec_gddt.params import SINGLE_RUN_PARAMS, TIMESTEPS, BASE_AGENTS_DICT +from aztec_gddt.params import ( + SINGLE_RUN_PARAMS, + TIMESTEPS, + BASE_AGENTS_DICT, + zero_timeseries, +) from aztec_gddt.params import * from aztec_gddt.structure import AZTEC_MODEL_BLOCKS from aztec_gddt.types import AztecModelParams, AztecModelState, Agent @@ -23,7 +28,7 @@ import os from glob import glob -CLOUD_BUCKET_NAME = 'aztec-gddt' +CLOUD_BUCKET_NAME = "aztec-gddt" logger = logging.getLogger(DEFAULT_LOGGER) @@ -43,29 +48,27 @@ def standard_run(N_timesteps=TIMESTEPS) -> DataFrame: sweep_params = {k: [v] for k, v in SINGLE_RUN_PARAMS.items()} # Load simulation arguments - sim_args = (INITIAL_STATE, - sweep_params, - AZTEC_MODEL_BLOCKS, - N_timesteps, - N_samples) + sim_args = (INITIAL_STATE, sweep_params, AZTEC_MODEL_BLOCKS, N_timesteps, N_samples) # Run simulation sim_df = sim_run(*sim_args) return sim_df -def custom_run(initial_state: Optional[AztecModelState] = None, - default_params: Optional[AztecModelParams] = None, - params_to_modify: Optional[Dict[str, List]] = None, - model_blocks: Optional[list[dict]] = None, - N_timesteps: int = TIMESTEPS, - N_samples: int = 1) -> DataFrame: +def custom_run( + initial_state: Optional[AztecModelState] = None, + default_params: Optional[AztecModelParams] = None, + params_to_modify: Optional[Dict[str, List]] = None, + model_blocks: Optional[list[dict]] = None, + N_timesteps: int = TIMESTEPS, + N_samples: int = 1, +) -> DataFrame: """ Function to run a custom cadCAD simulation Args: initial_state (AztecModelState): The initial state for the simulation - default_params (AztecModelParams): The default parameters to use. + default_params (AztecModelParams): The default parameters to use. params_to_sweep (Dict[str, List]): The parameters to sweep during the simulation model_blocks (list[dict]): The model blocks for the simulation N_timesteps (int): Number of timesteps to run the simulation @@ -83,7 +86,7 @@ def custom_run(initial_state: Optional[AztecModelState] = None, if model_blocks is None: model_blocks = AZTEC_MODEL_BLOCKS - # Begin by copying the indicated default settings. + # Begin by copying the indicated default settings. sweep_params = {k: [v] for k, v in default_params.items()} if params_to_modify is not None: @@ -98,29 +101,27 @@ def custom_run(initial_state: Optional[AztecModelState] = None, else: pass - sim_args = (initial_state, - sweep_params, - model_blocks, - N_timesteps, - N_samples) + sim_args = (initial_state, sweep_params, model_blocks, N_timesteps, N_samples) # Run simulation sim_df = sim_run(*sim_args) return sim_df -def psuu_exploratory_run(N_sweep_samples=-1, - N_samples=3, - N_timesteps=500, - N_jobs=-1, - parallelize_jobs=True, - supress_cadCAD_print=False, - output_path='', - timestep_tensor_prefix='', - N_sequencer=10, - N_prover=10, - base_folder="", - cloud_stream=True) -> Optional[DataFrame]: +def psuu_exploratory_run( + N_sweep_samples=-1, + N_samples=3, + N_timesteps=500, + N_jobs=-1, + parallelize_jobs=True, + supress_cadCAD_print=False, + output_path="", + timestep_tensor_prefix="", + N_sequencer=10, + N_prover=10, + base_folder="", + cloud_stream=True, +) -> Optional[DataFrame]: """Function which runs the cadCAD simulations Returns: @@ -131,42 +132,87 @@ def psuu_exploratory_run(N_sweep_samples=-1, # Relay Agent Sqn3Prv3_agents = [] - assign_params = {'stake_activation_period', 'phase_duration_commit_bond_min_blocks', 'gas_threshold_for_tx', 'proving_marketplace_usage_probability', 'gas_fee_l1_time_series', 'phase_duration_reveal_min_blocks', 'gwei_to_tokens', 'slash_params', 'gas_fee_blob_time_series', 'phase_duration_proposal_max_blocks', 'rewards_to_relay', 'phase_duration_rollup_max_blocks', 'phase_duration_rollup_min_blocks', - 'phase_duration_reveal_max_blocks', 'fee_subsidy_fraction', 'phase_duration_race_min_blocks', 'timestep_in_blocks', 'rewards_to_provers', 'label', 'daily_block_reward', 'blob_gas_threshold_for_tx', 'phase_duration_race_max_blocks', 'unstake_cooldown_period', 'phase_duration_commit_bond_max_blocks', 'commit_bond_amount', 'uncle_count', 'phase_duration_proposal_min_blocks', 'final_probability', 'op_cost_sequencer', 'op_cost_prover'} + assign_params = { + "stake_activation_period", + "phase_duration_commit_bond_min_blocks", + "gas_threshold_for_tx", + "proving_marketplace_usage_probability", + "gas_fee_l1_time_series", + "phase_duration_reveal_min_blocks", + "gwei_to_tokens", + "slash_params", + "gas_fee_blob_time_series", + "phase_duration_proposal_max_blocks", + "rewards_to_relay", + "phase_duration_rollup_max_blocks", + "phase_duration_rollup_min_blocks", + "phase_duration_reveal_max_blocks", + "fee_subsidy_fraction", + "phase_duration_race_min_blocks", + "timestep_in_blocks", + "rewards_to_provers", + "label", + "daily_block_reward", + "blob_gas_threshold_for_tx", + "phase_duration_race_max_blocks", + "unstake_cooldown_period", + "phase_duration_commit_bond_max_blocks", + "commit_bond_amount", + "uncle_count", + "phase_duration_proposal_min_blocks", + "final_probability", + "op_cost_sequencer", + "op_cost_prover", + } for _ in range(N_sequencer): - a = Agent(uuid=uuid4(), - balance=100_000, - is_sequencer=True, - is_prover=False, - is_relay=False, - staked_amount=32 * 100) + a = Agent( + uuid=uuid4(), + balance=100_000, + is_sequencer=True, + is_prover=False, + is_relay=False, + staked_amount=32 * 100, + ) Sqn3Prv3_agents.append(a) for _ in range(N_prover): - a = Agent(uuid=uuid4(), - balance=100_000, - is_sequencer=False, - is_prover=True, - is_relay=False, - staked_amount=0.0) + a = Agent( + uuid=uuid4(), + balance=100_000, + is_sequencer=False, + is_prover=True, + is_relay=False, + staked_amount=0.0, + ) Sqn3Prv3_agents.append(a) Sqn3Prv3_dict = {a.uuid: a for a in Sqn3Prv3_agents} Sqn3Prv3 = {**BASE_AGENTS_DICT, **Sqn3Prv3_dict} initial_state = INITIAL_STATE.copy() - initial_state['agents'] = Sqn3Prv3 - initial_state['token_supply'] = TokenSupply.from_state(initial_state) + initial_state["agents"] = Sqn3Prv3 + initial_state["token_supply"] = TokenSupply.from_state(initial_state) sweep_params = {k: [v] for k, v in SINGLE_RUN_PARAMS.items()} N_SAMPLES_CENSORSHIP_TS = 100 - CENSORING_BUILDERS = ['beaverbuild.org', 'rsync-builder.xyz', 'Flashbots', - 'BuildAI (https://buildai.net)', 'Gambit Labs', 'boba-builder.com', - 'Builder + www.btcs.com', 'builder0x69', '0x83bee517', - 'BloXroute', 'I can haz block', 'EigenPhi', - 'Edennetwork', 'blockbeelder'] + CENSORING_BUILDERS = [ + "beaverbuild.org", + "rsync-builder.xyz", + "Flashbots", + "BuildAI (https://buildai.net)", + "Gambit Labs", + "boba-builder.com", + "Builder + www.btcs.com", + "builder0x69", + "0x83bee517", + "BloXroute", + "I can haz block", + "EigenPhi", + "Edennetwork", + "blockbeelder", + ] CHERRY_PICKED_BLOCK_NUMBERS = [ 19427023, @@ -180,67 +226,84 @@ def psuu_exploratory_run(N_sweep_samples=-1, 19602101, 19475312, 19543729, - 19640128 + 19640128, ] N_RANDOM_SAMPLES_CENSORSHIP_TS = max( - N_SAMPLES_CENSORSHIP_TS - len(CHERRY_PICKED_BLOCK_NUMBERS), 0) + N_SAMPLES_CENSORSHIP_TS - len(CHERRY_PICKED_BLOCK_NUMBERS), 0 + ) # XXX: only take into consideration points after DENCUN - + DENCUN_BLOCK_NUMBER = 19426589 - local_path = 'data/auxiliary/eth_builder_validator_data_cleaned.parquet.gz' + local_path = "data/auxiliary/eth_builder_validator_data_cleaned.parquet.gz" if os.path.isfile(local_path): print("Reading locally.") - censorship_data = pd.read_parquet( - local_path).query(f"block_number >{DENCUN_BLOCK_NUMBER}") + censorship_data = pd.read_parquet(local_path).query( + f"block_number >{DENCUN_BLOCK_NUMBER}" + ) else: print("Reading from S3 bucket.") censorship_data = pd.read_parquet( - 's3://aztec-gddt/aux-data/eth_builder_validator_data_cleaned.parquet.gz').query(f"block_number > {DENCUN_BLOCK_NUMBER}") + "s3://aztec-gddt/aux-data/eth_builder_validator_data_cleaned.parquet.gz" + ).query(f"block_number > {DENCUN_BLOCK_NUMBER}") # Check that data has no unexpected issues - - assert censorship_data.isna().sum().sum() == 0, "The data should have no missing values." + + assert ( + censorship_data.isna().sum().sum() == 0 + ), "The data should have no missing values." num_repeats = censorship_data.duplicated().sum() assert num_repeats == 0, f"There are {num_repeats} duplicated values." - - assert censorship_data['block_number'].duplicated().sum() == 0, "There are unexpected duplicate block number entries in the data." - - assert len(censorship_data) == censorship_data['slot'].nunique(), "Number of slots should be the same as number of entries in data." - num_slots = censorship_data['slot'].nunique() - num_blocks = censorship_data['block_number'].nunique() - assert num_slots == num_blocks , f"There are {num_slots} slots, but {num_blocks} blocks." + + assert ( + censorship_data["block_number"].duplicated().sum() == 0 + ), "There are unexpected duplicate block number entries in the data." + + assert ( + len(censorship_data) == censorship_data["slot"].nunique() + ), "Number of slots should be the same as number of entries in data." + num_slots = censorship_data["slot"].nunique() + num_blocks = censorship_data["block_number"].nunique() + assert ( + num_slots == num_blocks + ), f"There are {num_slots} slots, but {num_blocks} blocks." # Begin logic for processing data into time series for sweep # XXX: check to see if data has missing or duplicated values - assert censorship_data.duplicated().sum() == 0, "Data contains duplicates. It should not." - assert censorship_data.isna().sum().sum() == 0, "Data has missing values. It should not. " + assert ( + censorship_data.duplicated().sum() == 0 + ), "Data contains duplicates. It should not." + assert ( + censorship_data.isna().sum().sum() == 0 + ), "Data has missing values. It should not. " # Begin logic for sampling time series SAFETY_MARGIN = 7 - SAMPLED_BLOCK_NUMBERS = (censorship_data - .block_number - .iloc[:-(N_timesteps * SAFETY_MARGIN)] - .sample(N_RANDOM_SAMPLES_CENSORSHIP_TS) - .astype(int) - .tolist()) + SAMPLED_BLOCK_NUMBERS = ( + censorship_data.block_number.iloc[: -(N_timesteps * SAFETY_MARGIN)] + .sample(N_RANDOM_SAMPLES_CENSORSHIP_TS) + .astype(int) + .tolist() + ) ALL_BLOCK_NUMBERS = CHERRY_PICKED_BLOCK_NUMBERS + SAMPLED_BLOCK_NUMBERS CENSORSHIP_SERIES_LIST = [] for block_no in ALL_BLOCK_NUMBERS: - ts = build_censor_series_from_role(data=censorship_data, - censor_list=CENSORING_BUILDERS, - start_time=block_no, - num_timesteps=N_timesteps * SAFETY_MARGIN, - role='builder', - start_time_is_block_no=True) + ts = build_censor_series_from_role( + data=censorship_data, + censor_list=CENSORING_BUILDERS, + start_time=block_no, + num_timesteps=N_timesteps * SAFETY_MARGIN, + role="builder", + start_time_is_block_no=True, + ) CENSORSHIP_SERIES_LIST.append(ts) # HACK: if min duration is `inf`, it will be dynamically set to the max duration @@ -249,26 +312,21 @@ def psuu_exploratory_run(N_sweep_samples=-1, # Phase Durations phase_duration_proposal_min_blocks=[0, 3], phase_duration_proposal_max_blocks=[3, 12], - - phase_duration_reveal_min_blocks=[0, float('inf')], + phase_duration_reveal_min_blocks=[0, float("inf")], phase_duration_reveal_max_blocks=[3, 24], - - phase_duration_commit_bond_min_blocks=[0, float('inf')], + phase_duration_commit_bond_min_blocks=[0, float("inf")], phase_duration_commit_bond_max_blocks=[3, 12], - - phase_duration_rollup_min_blocks=[0, float('inf')], + phase_duration_rollup_min_blocks=[0, float("inf")], phase_duration_rollup_max_blocks=[15, 80], - phase_duration_race_min_blocks=[0], phase_duration_race_max_blocks=[6], - gas_estimators=[DEFAULT_DETERMINISTIC_GAS_ESTIMATOR], tx_estimators=[DEFAULT_DETERMINISTIC_TX_ESTIMATOR], slash_params=[SLASH_PARAMS], gas_fee_l1_time_series=[zero_timeseries], gas_fee_blob_time_series=[zero_timeseries], censorship_series_builder=CENSORSHIP_SERIES_LIST, - censorship_series_validator=[ALWAYS_FALSE_SERIES] + censorship_series_validator=[ALWAYS_FALSE_SERIES], ) sweep_params = {**sweep_params, **sweep_params_upd} # type: ignore @@ -285,120 +343,174 @@ def psuu_exploratory_run(N_sweep_samples=-1, N_measurements = n_sweeps * N_timesteps * N_samples logger.info( - f'PSuU Exploratory Run Dimensions: {N_jobs=:,}, {N_timesteps=:,}, N_sweeps={n_sweeps:,}, {N_samples=:,}, N_trajectories={traj_combinations:,}, N_measurements={N_measurements:,}') + f"PSuU Exploratory Run Dimensions: {N_jobs=:,}, {N_timesteps=:,}, N_sweeps={n_sweeps:,}, {N_samples=:,}, N_trajectories={traj_combinations:,}, N_measurements={N_measurements:,}" + ) sweep_params_cartesian_product = { - k: list(v) for k, v in sweep_params_cartesian_product.items()} + k: list(v) for k, v in sweep_params_cartesian_product.items() + } - sweep_params_cartesian_product = {k: sample(v, N_sweep_samples) if N_sweep_samples > 0 else v - for k, v in sweep_params_cartesian_product.items()} + sweep_params_cartesian_product = { + k: sample(v, N_sweep_samples) if N_sweep_samples > 0 else v + for k, v in sweep_params_cartesian_product.items() + } def inf_to_max_duration(row: pd.Series, min_col: str, max_col: str) -> float: - if row[min_col] == float('inf'): + if row[min_col] == float("inf"): return row[max_col] else: return row[min_col] inf_to_max_duration_cols: list[dict[str, str]] = [ - dict(min_col='phase_duration_proposal_min_blocks', - max_col='phase_duration_proposal_max_blocks'), - dict(min_col='phase_duration_reveal_min_blocks', - max_col='phase_duration_reveal_max_blocks'), - dict(min_col='phase_duration_commit_bond_min_blocks', - max_col='phase_duration_commit_bond_max_blocks'), - dict(min_col='phase_duration_rollup_min_blocks', - max_col='phase_duration_rollup_max_blocks'), - dict(min_col='phase_duration_race_min_blocks', - max_col='phase_duration_race_max_blocks'), + dict( + min_col="phase_duration_proposal_min_blocks", + max_col="phase_duration_proposal_max_blocks", + ), + dict( + min_col="phase_duration_reveal_min_blocks", + max_col="phase_duration_reveal_max_blocks", + ), + dict( + min_col="phase_duration_commit_bond_min_blocks", + max_col="phase_duration_commit_bond_max_blocks", + ), + dict( + min_col="phase_duration_rollup_min_blocks", + max_col="phase_duration_rollup_max_blocks", + ), + dict( + min_col="phase_duration_race_min_blocks", + max_col="phase_duration_race_max_blocks", + ), ] param_df = pd.DataFrame(sweep_params_cartesian_product) for kwargs in inf_to_max_duration_cols: - param_df.loc[:, kwargs['min_col']] = param_df.apply( # type: ignore - inf_to_max_duration, axis='columns', **kwargs).astype(int) # type: ignore + param_df.loc[:, kwargs["min_col"]] = param_df.apply( # type: ignore + inf_to_max_duration, axis="columns", **kwargs + ).astype( + int + ) # type: ignore - sweep_params_cartesian_product = param_df.to_dict(orient='list') + sweep_params_cartesian_product = param_df.to_dict(orient="list") sim_start_time = datetime.now() logger.info( - f"PSuU Exploratory Run starting at {sim_start_time}, ({sim_start_time - invoke_time} since invoke)") + f"PSuU Exploratory Run starting at {sim_start_time}, ({sim_start_time - invoke_time} since invoke)" + ) if N_jobs <= 1: # Load simulation arguments - sim_args = (initial_state, - sweep_params_cartesian_product, - AZTEC_MODEL_BLOCKS, - N_timesteps, - N_samples) + sim_args = ( + initial_state, + sweep_params_cartesian_product, + AZTEC_MODEL_BLOCKS, + N_timesteps, + N_samples, + ) # Run simulation - sim_df = sim_run(*sim_args, exec_mode='single', assign_params=assign_params, - supress_cadCAD_print=supress_cadCAD_print) + sim_df = sim_run( + *sim_args, + exec_mode="single", + assign_params=assign_params, + supress_cadCAD_print=supress_cadCAD_print, + ) else: sweeps_per_process = 25 processes = N_jobs chunk_size = sweeps_per_process split_dicts = [ - {k: v[i:i + chunk_size] - for k, v in sweep_params_cartesian_product.items()} - for i in range(0, len(list(sweep_params_cartesian_product.values())[0]), chunk_size) + { + k: v[i : i + chunk_size] + for k, v in sweep_params_cartesian_product.items() + } + for i in range( + 0, len(list(sweep_params_cartesian_product.values())[0]), chunk_size + ) ] def run_chunk(i_chunk, sweep_params): logger.debug(f"{i_chunk}, {datetime.now()}") - sim_args = (initial_state, - sweep_params, - AZTEC_MODEL_BLOCKS, - N_timesteps, - N_samples) + sim_args = ( + initial_state, + sweep_params, + AZTEC_MODEL_BLOCKS, + N_timesteps, + N_samples, + ) # Run simulationz - sim_df = sim_run(*sim_args, exec_mode='single', assign_params=assign_params, - supress_cadCAD_print=supress_cadCAD_print) - output_filename = Path(output_path) / \ - f'{timestep_tensor_prefix}-{i_chunk}.pkl.zip' - sim_df['simulation'] = i_chunk + sim_df = sim_run( + *sim_args, + exec_mode="single", + assign_params=assign_params, + supress_cadCAD_print=supress_cadCAD_print, + ) + output_filename = ( + Path(output_path) / f"{timestep_tensor_prefix}-{i_chunk}.pkl.zip" + ) + sim_df["simulation"] = i_chunk logger.debug( - f"n_groups: {sim_df.groupby(['simulation', 'run', 'subset']).ngroups}") + f"n_groups: {sim_df.groupby(['simulation', 'run', 'subset']).ngroups}" + ) sim_df.to_pickle(output_filename) agg_df = timestep_tensor_to_trajectory_tensor(sim_df) - agg_output_filename = Path(output_path) / \ - f"trajectory_tensor-{i_chunk}.csv.zip" + agg_output_filename = ( + Path(output_path) / f"trajectory_tensor-{i_chunk}.csv.zip" + ) agg_df.to_csv(agg_output_filename) if cloud_stream: session = boto3.Session() s3 = session.client("s3") - s3.upload_file(str(agg_output_filename), - CLOUD_BUCKET_NAME, - str(Path(base_folder) / f"trajectory_tensor-{i_chunk}.pkl.zip")) - s3.upload_file(output_filename, - CLOUD_BUCKET_NAME, - str(Path(base_folder) / f'{timestep_tensor_prefix}-{i_chunk}.pkl.zip')) + s3.upload_file( + str(agg_output_filename), + CLOUD_BUCKET_NAME, + str(Path(base_folder) / f"trajectory_tensor-{i_chunk}.pkl.zip"), + ) + s3.upload_file( + output_filename, + CLOUD_BUCKET_NAME, + str( + Path(base_folder) + / f"{timestep_tensor_prefix}-{i_chunk}.pkl.zip" + ), + ) os.remove(str(output_filename)) args = enumerate(split_dicts) if parallelize_jobs: Parallel(n_jobs=processes)( delayed(run_chunk)(i_chunk, sweep_params) - for (i_chunk, sweep_params) in tqdm(args, desc='Simulation Chunks', total=len(split_dicts)) + for (i_chunk, sweep_params) in tqdm( + args, desc="Simulation Chunks", total=len(split_dicts) + ) ) else: - for (i_chunk, sweep_params) in tqdm(args): - sim_args = (initial_state, - sweep_params, - AZTEC_MODEL_BLOCKS, - N_timesteps, - N_samples) + for i_chunk, sweep_params in tqdm(args): + sim_args = ( + initial_state, + sweep_params, + AZTEC_MODEL_BLOCKS, + N_timesteps, + N_samples, + ) # Run simulationz - sim_df = sim_run(*sim_args, exec_mode='single', assign_params=assign_params, - supress_cadCAD_print=supress_cadCAD_print) - output_filename = output_path + f'-{i_chunk}.pkl.zip' + sim_df = sim_run( + *sim_args, + exec_mode="single", + assign_params=assign_params, + supress_cadCAD_print=supress_cadCAD_print, + ) + output_filename = output_path + f"-{i_chunk}.pkl.zip" sim_df.to_pickle(output_filename) end_start_time = datetime.now() duration: float = (end_start_time - sim_start_time).total_seconds() logger.info( - f"PSuU Exploratory Run finished at {end_start_time}, ({end_start_time - sim_start_time} since sim start)") + f"PSuU Exploratory Run finished at {end_start_time}, ({end_start_time - sim_start_time} since sim start)" + ) logger.info( - f"PSuU Exploratory Run Performance Numbers; Duration (s): {duration:,.2f}, Measurements Per Second: {N_measurements/duration:,.2f} M/s, Measurements per Job * Second: {N_measurements/(duration * N_jobs):,.2f} M/(J*s)") + f"PSuU Exploratory Run Performance Numbers; Duration (s): {duration:,.2f}, Measurements Per Second: {N_measurements/duration:,.2f} M/s, Measurements per Job * Second: {N_measurements/(duration * N_jobs):,.2f} M/(J*s)" + ) if cloud_stream: files = glob(str(Path(output_path) / f"trajectory_tensor-*.csv.zip")) @@ -409,12 +521,16 @@ def run_chunk(i_chunk, sweep_params): agg_df.to_csv(str(Path(output_path) / f"trajectory_tensor.csv.zip")) session = boto3.Session() s3 = session.client("s3") - logger.info(f"Trajector Tensor saved to {str(Path(base_folder) / f'trajectory_tensor.csv.zip')}") - s3.upload_file(str(Path(output_path) / f"trajectory_tensor.csv.zip"), - CLOUD_BUCKET_NAME, - str(Path(base_folder) / f"trajectory_tensor.csv.zip")) - - if 'sim_df' in locals(): + logger.info( + f"Trajector Tensor saved to {str(Path(base_folder) / f'trajectory_tensor.csv.zip')}" + ) + s3.upload_file( + str(Path(output_path) / f"trajectory_tensor.csv.zip"), + CLOUD_BUCKET_NAME, + str(Path(base_folder) / f"trajectory_tensor.csv.zip"), + ) + + if "sim_df" in locals(): return sim_df else: return None diff --git a/aztec_gddt/helper.py b/aztec_gddt/helper.py index 60a491b..9a125d7 100644 --- a/aztec_gddt/helper.py +++ b/aztec_gddt/helper.py @@ -106,8 +106,10 @@ def value_from_param_timeseries_suf( ): time_series = params[param_key] - if state["timestep"] < len(time_series): - value = time_series[state["timestep"]] - else: - value = time_series[-1] - return value + assert state["time_l1"] < len( + time_series + ), "The time_l1 of {} is out of bounds for the time series of {}".format( + state["time_l1"], param_key + ) + + return time_series[state["time_l1"]] diff --git a/aztec_gddt/logic_functions/functional_parameterizations.py b/aztec_gddt/logic_functions/functional_parameterizations.py new file mode 100644 index 0000000..723a97a --- /dev/null +++ b/aztec_gddt/logic_functions/functional_parameterizations.py @@ -0,0 +1,85 @@ +from typing import Tuple + + +def determine_profitability(phase: str, params: dict) -> Tuple[float, float, bool]: + """Function to determine profitability for an agent + + Args: + phase (str): The phase this function is being called in + params (dict): The system parameters + + Returns: + Tuple[float, float, bool]: Returns the expected rewards, expected costs and whether there is a payoff + """ + if params["fp_determine_profitability"] == "Always Pass": + return determine_profitability_always_pass(phase, params) + else: + assert ( + False + ), "The param of {} for fp_determine_profitability is not valid".format( + params["fp_determine_profitability"] + ) + + +def determine_profitability_always_pass( + phase: str, params: dict +) -> Tuple[float, float, bool]: + if phase == "Reveal Content": + # expected_rewards = params['daily_block_reward'] + # expected_rewards *= rewards_to_sequencer(params) + # expected_rewards /= expected_l2_blocks_per_day + expected_rewards = 1 # XXX: Temporary to ignore economic assumptions. + assert ( + expected_rewards >= 0 + ), "REVEAL_CONTENT: Expected rewards should be positive." + + # expected_costs: float = params["op_cost_sequencer"] + # expected_costs += fee + # expected_costs += SAFETY_BUFFER + # expected_costs *= params['gwei_to_tokens'] + expected_costs = 0 # XXX: Temporary to ignore economic assumptions. + assert expected_costs == 0, "REVEAL_CONTENT: Expected costs should be zero." + + payoff_reveal = expected_rewards - expected_costs + return expected_rewards, expected_costs, payoff_reveal + elif phase == "Submit Proof": + expected_rewards, expected_costs, payoff_reveal = determine_profitability( + "Reveal Content", params + ) + + # expected_rewards = params['daily_block_reward'] + # expected_rewards *= params['rewards_to_provers'] + # expected_rewards /= expected_l2_blocks_per_day + expected_rewards = 1 + assert ( + expected_rewards >= 0 + ), "SUBMIT PROOF: Expected rewards should be positive." + + # expected_costs: float = params["op_cost_prover"] + # expected_costs += fee + # expected_costs += SAFETY_BUFFER + # expected_costs *= params['gwei_to_tokens'] + expected_costs = 0 + assert expected_costs == 0, "SUBMIT PROOF: Expected costs should be zero." + + payoff_reveal = expected_rewards - expected_costs + return expected_rewards, expected_costs, payoff_reveal + elif phase == "Commit Bond": + # expected_rewards = params['daily_block_reward'] + # expected_rewards *= rewards_to_sequencer(params) + # expected_rewards /= expected_l2_blocks_per_day + expected_rewards = 1 # XXX: Temporary to ignore economic assumptions. + assert expected_rewards > 0, "COMMIT_BOND: Expected rewards should be positive." + + # expected_costs: float = params["op_cost_sequencer"] + # expected_costs += fee + # expected_costs += SAFETY_BUFFER + # expected_costs *= params['gwei_to_tokens'] + expected_costs = 0 # XXX: Temporary to ignore economic assumptions. + assert expected_costs == 0, "COMMIT_BOND: Expected costs should be 0." + + payoff_reveal = expected_rewards - expected_costs + return expected_rewards, expected_costs, payoff_reveal + + else: + assert False, "Not implemented for phase {}".format(phase) diff --git a/aztec_gddt/logic_functions/phases.py b/aztec_gddt/logic_functions/phases.py index 1127249..d48dac5 100644 --- a/aztec_gddt/logic_functions/phases.py +++ b/aztec_gddt/logic_functions/phases.py @@ -4,6 +4,7 @@ from copy import deepcopy, copy from random import choice from scipy.stats import uniform +from .functional_parameterizations import determine_profitability def s_gas_fee_l1(p: AztecModelParams, _2, _3, s, _5): @@ -204,21 +205,9 @@ def p_commit_bond( params ) - # expected_rewards = params['daily_block_reward'] - # expected_rewards *= rewards_to_sequencer(params) - # expected_rewards /= expected_l2_blocks_per_day - expected_rewards = 1 # XXX: Temporary to ignore economic assumptions. - assert expected_rewards > 0, "COMMIT_BOND: Expected rewards should be positive." - - # expected_costs: float = params["op_cost_sequencer"] - # expected_costs += fee - # expected_costs += SAFETY_BUFFER - # expected_costs *= params['gwei_to_tokens'] - expected_costs = 0 # XXX: Temporary to ignore economic assumptions. - assert expected_costs == 0, "COMMIT_BOND: Expected costs should be 0." - - payoff_reveal = expected_rewards - expected_costs - assert payoff_reveal >= 0, "COMMIT_BOND: Payoff should not be negative." + expected_rewards, expected_costs, payoff_reveal = determine_profitability( + "Commit Bond", params + ) if payoff_reveal >= 0: @@ -229,19 +218,10 @@ def p_commit_bond( params["final_probability"], ) ) - # gas_fee_l1_acceptable = ( - # state["gas_fee_l1"] <= params["gas_threshold_for_tx"] - # ) - - gas_fee_l1_acceptable = True # XXX: Temporary economic assumption block_is_uncensored = check_for_censorship(params, state) - if ( - agent_decides_to_reveal_commit_bond - and gas_fee_l1_acceptable - and block_is_uncensored - ): + if agent_decides_to_reveal_commit_bond and block_is_uncensored: updated_process = copy(process) lead_seq: Agent = state["agents"][process.leading_sequencer] proposal_uuid = process.tx_winning_proposal @@ -348,29 +328,11 @@ def p_reveal_content( "l1_blocks_per_day" ] / total_phase_duration(params) - # expected_rewards = params['daily_block_reward'] - # expected_rewards *= rewards_to_sequencer(params) - # expected_rewards /= expected_l2_blocks_per_day - expected_rewards = 1 # XXX: Temporary to ignore economic assumptions. - assert ( - expected_rewards >= 0 - ), "REVEAL_CONTENT: Expected rewards should be positive." - - # expected_costs: float = params["op_cost_sequencer"] - # expected_costs += fee - # expected_costs += SAFETY_BUFFER - # expected_costs *= params['gwei_to_tokens'] - expected_costs = 0 # XXX: Temporary to ignore economic assumptions. - assert ( - expected_costs == 0 - ), "REVEAL_CONTENT: Expected costs should be zero." - - payoff_reveal = expected_rewards - expected_costs + expected_rewards, expected_costs, payoff_reveal = ( + determine_profitability("Reveal Content", params) + ) agent_expects_profit = payoff_reveal >= 0 - assert ( - agent_expects_profit - ), "REVEAL_CONTENT: Agent should be expecting profit." agent_decides_to_reveal_block_content = bernoulli_trial( probability=trial_probability( @@ -379,25 +341,11 @@ def p_reveal_content( ) ) - # gas_fee_blob_acceptable = ( - # state["gas_fee_blob"] <= params["blob_gas_threshold_for_tx"] - # ) - - gas_fee_blob_acceptable = True - - # gas_fee_l1_acceptable = ( - # state["gas_fee_l1"] <= params["gas_threshold_for_tx"] - # ) - - gas_fee_l1_acceptable = True - block_is_uncensored = check_for_censorship(params, state) if ( agent_expects_profit and agent_decides_to_reveal_block_content - and gas_fee_blob_acceptable - and gas_fee_l1_acceptable and block_is_uncensored ): updated_process = copy(process) @@ -495,27 +443,10 @@ def p_submit_proof( "l1_blocks_per_day" ] / total_phase_duration(params) - # expected_rewards = params['daily_block_reward'] - # expected_rewards *= params['rewards_to_provers'] - # expected_rewards /= expected_l2_blocks_per_day - expected_rewards = 1 - assert ( - expected_rewards >= 0 - ), "SUBMIT PROOF: Expected rewards should be positive." - - # expected_costs: float = params["op_cost_prover"] - # expected_costs += fee - # expected_costs += SAFETY_BUFFER - # expected_costs *= params['gwei_to_tokens'] - expected_costs = 0 - assert ( - expected_costs == 0 - ), "SUBMIT PROOF: Expected costs should be zero." - - payoff_reveal = expected_rewards - expected_costs - + expected_rewards, expected_costs, payoff_reveal = ( + determine_profitability("Submit Proof", params) + ) agent_expects_profit = payoff_reveal >= 0 - assert agent_expects_profit, "SUBMIT_PROOF: Agent should expect profit." agent_decides_to_reveal_rollup_proof = bernoulli_trial( probability=trial_probability( @@ -524,16 +455,10 @@ def p_submit_proof( ) ) - # gas_fee_l1_acceptable = ( - # state["gas_fee_l1"] <= params["gas_threshold_for_tx"] - # ) - gas_fee_l1_acceptable = True # XXX: Assume gas fee is acceptable. - block_is_uncensored = check_for_censorship(params, state) if ( agent_decides_to_reveal_rollup_proof - and gas_fee_l1_acceptable and agent_expects_profit and block_is_uncensored ): @@ -699,15 +624,9 @@ def s_transactions_new_proposals( size = params["tx_estimators"].proposal_average_size(state) public_share = 0.5 # Assumption: Share of public function calls - # gas_fee_l1_acceptable = ( - # state["gas_fee_l1"] <= params["gas_threshold_for_tx"] - # ) - - gas_fee_l1_acceptable = True # XXX: Temporary economic assumption - block_is_uncensored = check_for_censorship(params, state) - if gas_fee_l1_acceptable and block_is_uncensored: + if block_is_uncensored: new_proposal = Proposal( who=potential_proposer, when=state["time_l1"], diff --git a/aztec_gddt/params.py b/aztec_gddt/params.py index fea77a4..05a74f3 100644 --- a/aztec_gddt/params.py +++ b/aztec_gddt/params.py @@ -39,7 +39,7 @@ is_prover=False, is_relay=False, staked_amount=0.0, # unit: Tokens - ) + ), ] @@ -59,8 +59,7 @@ for i in range(N_INITIAL_AGENTS) ] -INITIAL_AGENTS_DICT: dict[AgentUUID, Agent] = { - a.uuid: a for a in INITIAL_AGENTS} +INITIAL_AGENTS_DICT: dict[AgentUUID, Agent] = {a.uuid: a for a in INITIAL_AGENTS} AGENTS_DICT = {**BASE_AGENTS_DICT, **INITIAL_AGENTS_DICT} @@ -103,10 +102,10 @@ cumm_fee_cashback=INITIAL_CUMM_CASHBACK, cumm_burn=INITIAL_CUMM_BURN, token_supply=INITIAL_SUPPLY, - is_censored=False + is_censored=False, ) -INITIAL_STATE['token_supply'] = TokenSupply.from_state(INITIAL_STATE) +INITIAL_STATE["token_supply"] = TokenSupply.from_state(INITIAL_STATE) ############################################################# ## Begin: Steady state gas estimators defined ## @@ -135,17 +134,24 @@ def steady_state_l1_gas_estimate(state: AztecModelState): - if state["timestep"] < len(steady_gas_fee_l1_time_series): - return steady_gas_fee_l1_time_series[state["timestep"]] - else: - return steady_gas_fee_l1_time_series[-1] + assert state["time_l1"] < len( + steady_gas_fee_l1_time_series + ), "The time_l1 of {} is out of bounds for the time series of steady_gas_fee_l1_time_series".format( + state["time_l1"] + ) + + return steady_gas_fee_l1_time_series[state["time_l1"]] def steady_state_blob_gas_estimate(state: AztecModelState): - if state["timestep"] < len(steady_gas_fee_blob_time_series): - return steady_gas_fee_blob_time_series[state["timestep"]] - else: - return steady_gas_fee_blob_time_series[-1] + + assert state["time_l1"] < len( + steady_gas_fee_blob_time_series + ), "The time_l1 of {} is out of bounds for the time series of steady_gas_fee_blob_time_series".format( + state["time_l1"] + ) + + return steady_gas_fee_blob_time_series[state["time_l1"]] ############################################################# @@ -165,10 +171,10 @@ def steady_state_blob_gas_estimate(state: AztecModelState): final_time = floor(0.25 * TIMESTEPS) # Assumption: 25% of timesteps -zero_timeseries = np.zeros(TIMESTEPS) +zero_timeseries = np.zeros(TIMESTEPS * 10) -single_shock_gas_fee_l1_time_series = np.zeros(TIMESTEPS) -single_shock_gas_fee_blob_time_series = np.zeros(TIMESTEPS) +single_shock_gas_fee_l1_time_series = np.zeros(TIMESTEPS * 10) +single_shock_gas_fee_blob_time_series = np.zeros(TIMESTEPS * 10) single_shock_gas_fee_l1_time_series[0:initial_time] = steady_gas_fee_l1_time_series[ 0:initial_time @@ -176,8 +182,8 @@ def steady_state_blob_gas_estimate(state: AztecModelState): single_shock_gas_fee_l1_time_series[-final_time:] = steady_gas_fee_l1_time_series[ -final_time: ].copy() -single_shock_gas_fee_l1_time_series[initial_time: TIMESTEPS - final_time] = ( - steady_gas_fee_l1_time_series[initial_time: TIMESTEPS - final_time].copy() +single_shock_gas_fee_l1_time_series[initial_time : TIMESTEPS - final_time] = ( + steady_gas_fee_l1_time_series[initial_time : TIMESTEPS - final_time].copy() + L1_SHOCK_AMOUNT ) @@ -187,9 +193,8 @@ def steady_state_blob_gas_estimate(state: AztecModelState): single_shock_gas_fee_blob_time_series[-final_time:] = steady_gas_fee_blob_time_series[ -final_time: ].copy() -single_shock_gas_fee_blob_time_series[initial_time: TIMESTEPS - final_time] = ( - steady_gas_fee_blob_time_series[initial_time: TIMESTEPS - - final_time].copy() +single_shock_gas_fee_blob_time_series[initial_time : TIMESTEPS - final_time] = ( + steady_gas_fee_blob_time_series[initial_time : TIMESTEPS - final_time].copy() + L1_SHOCK_AMOUNT ) @@ -222,8 +227,8 @@ def steady_state_blob_gas_estimate(state: AztecModelState): intermit_shock_gas_fee_l1_time_series[-final_time:] = steady_gas_fee_l1_time_series[ -final_time: ].copy() -intermit_shock_gas_fee_l1_time_series[initial_time: TIMESTEPS - final_time] = ( - steady_gas_fee_l1_time_series[initial_time: TIMESTEPS - final_time].copy() +intermit_shock_gas_fee_l1_time_series[initial_time : TIMESTEPS - final_time] = ( + steady_gas_fee_l1_time_series[initial_time : TIMESTEPS - final_time].copy() + L1_INTER_SHOCK_SIGNAL ) @@ -257,7 +262,7 @@ def steady_state_blob_gas_estimate(state: AztecModelState): commitment_bond=lambda _: 100_000, # type: ignore content_reveal=lambda _: 81_000, # type: ignore content_reveal_blob=lambda _: 500_000, # type: ignore - rollup_proof=lambda _: 700_000 # type: ignore + rollup_proof=lambda _: 700_000, # type: ignore ) @@ -271,71 +276,82 @@ def steady_state_blob_gas_estimate(state: AztecModelState): ALWAYS_FALSE_SERIES = {i: False for i in range(0, L1_TIME_SERIES_SIZE)} -def build_censor_series_from_role(data: pd.DataFrame, - role: str, - censor_list: list[str], - start_time: int, - num_timesteps: int = 1000, - start_time_is_block_no: bool = False) -> dict[L1Blocks, bool]: +def build_censor_series_from_role( + data: pd.DataFrame, + role: str, + censor_list: list[str], + start_time: int, + num_timesteps: int = 1000, + start_time_is_block_no: bool = False, +) -> dict[L1Blocks, bool]: # XXX: this assumes that the DataFrame has a unique, non-missing measurement # for each L1 time. # L1 Time. - sorted_data = data.sort_values(by='date') + sorted_data = data.sort_values(by="date") if censor_list is None: censor_list = [] if start_time_is_block_no: - relevant_df = sorted_data.query(f"(block_number >= {start_time}) & (block_number < {start_time + num_timesteps})") - censored_list = relevant_df[role].apply( - lambda x: x in censor_list).to_list() - - index_range_to_use: List[int] = [x for x in range( - 0, 0 + num_timesteps)] - + relevant_df = sorted_data.query( + f"(block_number >= {start_time}) & (block_number < {start_time + num_timesteps})" + ) + censored_list = relevant_df[role].apply(lambda x: x in censor_list).to_list() + + index_range_to_use: List[int] = [x for x in range(0, 0 + num_timesteps)] + censored_dict = dict(zip(index_range_to_use, censored_list)) else: - index_range_to_use: List[int] = [x for x in range( - start_time, start_time + num_timesteps)] + index_range_to_use: List[int] = [ + x for x in range(start_time, start_time + num_timesteps) + ] indexed_data: pd.DataFrame = sorted_data.iloc[index_range_to_use] - censored_list: list[bool] = indexed_data[role].apply( - lambda x: x in censor_list).to_list() + censored_list: list[bool] = ( + indexed_data[role].apply(lambda x: x in censor_list).to_list() + ) censored_dict = dict(zip(index_range_to_use, censored_list)) return censored_dict -def build_censor_params(data: pd.DataFrame, - censoring_builders: List[str], - censoring_validators: List[str], - start_time: int, - num_timesteps: int = 1000): +def build_censor_params( + data: pd.DataFrame, + censoring_builders: List[str], + censoring_validators: List[str], + start_time: int, + num_timesteps: int = 1000, +): practical_num_timesteps = L1_BUFFER * num_timesteps if start_time is None: data_length = len(data) - start_time = random.randint( - 0, data_length - (practical_num_timesteps + 1)) + start_time = random.randint(0, data_length - (practical_num_timesteps + 1)) # XXX: Currently doubling number of timesteps due to weird out-of-range errors on long runs. - censorship_builder_data: dict[L1Blocks, bool] = build_censor_series_from_role(data=data, - censor_list=censoring_builders, - start_time=start_time, - num_timesteps=practical_num_timesteps, - role='builder') - censorship_validator_data: dict[L1Blocks, bool] = build_censor_series_from_role(data=data, - censor_list=censoring_validators, - start_time=start_time, - num_timesteps=practical_num_timesteps, - role='validator') + censorship_builder_data: dict[L1Blocks, bool] = build_censor_series_from_role( + data=data, + censor_list=censoring_builders, + start_time=start_time, + num_timesteps=practical_num_timesteps, + role="builder", + ) + censorship_validator_data: dict[L1Blocks, bool] = build_censor_series_from_role( + data=data, + censor_list=censoring_validators, + start_time=start_time, + num_timesteps=practical_num_timesteps, + role="validator", + ) - censorship_info_dict = {"censorship_series_builder": [censorship_builder_data], - "censorship_series_validator": [censorship_validator_data]} + censorship_info_dict = { + "censorship_series_builder": [censorship_builder_data], + "censorship_series_validator": [censorship_validator_data], + } return censorship_info_dict @@ -368,7 +384,6 @@ def build_censor_params(data: pd.DataFrame, phase_duration_rollup_max_blocks=3, # Assumption phase_duration_race_min_blocks=0, # Assumption phase_duration_race_max_blocks=2, # Assumption - stake_activation_period=40, # Assumption: Currently not impactful unstake_cooldown_period=40, # Assumption: Currently not impactful # Behavioral Parameters @@ -379,15 +394,12 @@ def build_censor_params(data: pd.DataFrame, blob_gas_threshold_for_tx=250, # Assumption: Global Probability, could instantiate agents with [0, 1] proving_marketplace_usage_probability=0.7, - rewards_to_provers=0.3, # Assumption: Reward Share rewards_to_relay=0.01, # Assumption: Reward Share - # Iniital Assumptions: No Censorship censorship_series_builder=ALWAYS_FALSE_SERIES, # Iniital Assumption: No Censorship censorship_series_validator=ALWAYS_FALSE_SERIES, - gwei_to_tokens=1e-9, gas_estimators=DEFAULT_DETERMINISTIC_GAS_ESTIMATOR, tx_estimators=DEFAULT_DETERMINISTIC_TX_ESTIMATOR, @@ -402,5 +414,6 @@ def build_censor_params(data: pd.DataFrame, safety_factor_commit_bond=0.0, safety_factor_reveal_content=0.0, safety_factor_rollup_proof=0.0, - past_gas_weight_fraction=0.9 + past_gas_weight_fraction=0.9, + fp_determine_profitability="Always Pass", ) diff --git a/documentation-obsidian/.obsidian/workspace.json b/documentation-obsidian/.obsidian/workspace.json index 580bfd5..68f0fbc 100644 --- a/documentation-obsidian/.obsidian/workspace.json +++ b/documentation-obsidian/.obsidian/workspace.json @@ -15,8 +15,8 @@ "state": { "file": "Sub-Canvases/PSUBs.canvas", "viewState": { - "x": 720.1211501549461, - "y": 510.4915994447455, + "x": 675.5923833630928, + "y": -2012.8051854269424, "zoom": -1.4114381295266551 } } diff --git a/notebooks/test_run.ipynb b/notebooks/test_run.ipynb index 242aa7f..79f6245 100644 --- a/notebooks/test_run.ipynb +++ b/notebooks/test_run.ipynb @@ -32,17 +32,13 @@ "metadata": {}, "outputs": [ { - "ename": "ImportError", - "evalue": "cannot import name 'Concatenate' from 'typing' (/Users/seanmcowen/opt/anaconda3/envs/BlockScience/lib/python3.9/typing.py)", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m/var/folders/y0/fwkpk2ps087b_2qxvhjstrfr0000gn/T/ipykernel_79155/869609700.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0maztec_gddt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mexperiment\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mstandard_run\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0msim_df\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mstandard_run\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/Dropbox/BlockScience/aztec-gddt/notebooks/../aztec_gddt/__init__.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0maztec_gddt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparams\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mSINGLE_RUN_PARAMS\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mINITIAL_STATE\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mTIMESTEPS\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mSAMPLES\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0maztec_gddt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstructure\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mAZTEC_MODEL_BLOCKS\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mlogging\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mDEFAULT_LOGGER\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m'aztec-design-digital-twin'\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/Dropbox/BlockScience/aztec-gddt/notebooks/../aztec_gddt/params.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0maztec_gddt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtypes\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0muuid\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0muuid4\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mscipy\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstats\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mnorm\u001b[0m \u001b[0;31m# type: ignore\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mnumpy\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mpandas\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mpd\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/Dropbox/BlockScience/aztec-gddt/notebooks/../aztec_gddt/types.py\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtyping\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mAnnotated\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mDict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mTypedDict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mNamedTuple\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mOptional\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mtyping\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mAny\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mCallable\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mConcatenate\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mParamSpec\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mSequence\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0menum\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mIntEnum\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mEnum\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mauto\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mmath\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mfloor\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mnumpy\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mImportError\u001b[0m: cannot import name 'Concatenate' from 'typing' (/Users/seanmcowen/opt/anaconda3/envs/BlockScience/lib/python3.9/typing.py)" + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/seanmcowen/opt/anaconda3/envs/Aztec/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning:\n", + "\n", + "IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + "\n" ] } ], @@ -807,7 +803,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.12" + "version": "3.11.8" } }, "nbformat": 4,