[research/lotterysim] update crawlers, re-tune controllers for better acc, and stable apr

This commit is contained in:
ertosns
2023-08-01 18:44:01 +03:00
parent 77e5b65a80
commit 2a0a9d84b8
5 changed files with 29 additions and 27 deletions

View File

@@ -39,9 +39,9 @@ PRIMARY_REWARD_TARGET = 0.33 # staked ratio
# secondary controller assumes certain frequency of leaders per slot
SECONDARY_LEAD_TARGET = 1 #number of lead per slot
# maximum transaction size
MAX_BLOCK_SIZE = 1000
MAX_BLOCK_SIZE = 100
# maximum transaction computational cost
MAX_BLOCK_CC = 100
MAX_BLOCK_CC = 10
# fee controller computational capacity target
FEE_TARGET = MAX_BLOCK_CC
# max fee base value

View File

@@ -16,7 +16,9 @@ if __name__ == "__main__":
mu = PREMINT/NODES
darkies = [Darkie(random.gauss(mu, mu/10), strategy=random_strategy(EPOCH_LENGTH)) for _ in range(NODES)]
#dt = DarkfiTable(0, RUNNING_TIME, CONTROLLER_TYPE_DISCRETE, kp=-0.010399999999938556, ki=-0.0365999996461878, kd=0.03840000000000491, r_kp=-2.53, r_ki=29.5, r_kd=53.77)
dt = DarkfiTable(PREMINT, RUNNING_TIME, CONTROLLER_TYPE_DISCRETE, kp=-0.010399999999938556, ki=-0.0365999996461878, kd=0.03840000000000491, r_kp=-0.719, r_ki=1.6, r_kd=0.1, fee_kp=-0.068188, fee_ki=-0.000205)
#dt = DarkfiTable(PREMINT, RUNNING_TIME, CONTROLLER_TYPE_DISCRETE, kp=-0.010399999999938556, ki=-0.0365999996461878, kd=0.03840000000000491, r_kp=-0.719, r_ki=1.6, r_kd=0.1, fee_kp=-0.068188, fee_ki=-0.000205)
#dt = DarkfiTable(PREMINT, RUNNING_TIME, CONTROLLER_TYPE_DISCRETE, kp=-0.010399999999938556, ki=-0.0365999996461878, r_kp=0.229, r_ki=2.419, fee_kp=-0.068188, fee_ki=-0.000205)
dt = DarkfiTable(PREMINT, RUNNING_TIME, CONTROLLER_TYPE_DISCRETE, kp=0.0259, ki=-0.0319, r_kp=0.229, r_ki=2.419, fee_kp=-0.068188, fee_ki=-0.000205)
for darkie in darkies:
dt.add_darkie(darkie)
acc, cc_acc, avg_apy, avg_reward, stake_ratio, avg_apr = dt.background(rand_running_time=False)

View File

@@ -6,7 +6,7 @@ from tqdm import tqdm
import os
from core.strategy import random_strategy
AVG_LEN = 10
AVG_LEN = 5
KP_STEP=0.01
KP_SEARCH=-0.63
@@ -14,7 +14,7 @@ KP_SEARCH=-0.63
KI_STEP=0.01
KI_SEARCH=3.35
RUNNING_TIME=5000
RUNNING_TIME=1000
NODES = 1000
SHIFTING = 0.05
@@ -30,7 +30,6 @@ KI='ki'
KP_RANGE_MULTIPLIER = 2
KI_RANGE_MULTIPLIER = 2
highest_gain = (KP_SEARCH, KI_SEARCH)
parser = ArgumentParser()
@@ -45,13 +44,13 @@ rand_running_time = args.rand_running_time
debug = args.debug
def experiment(controller_type=CONTROLLER_TYPE_DISCRETE, rkp=0, rki=0, distribution=[], hp=True):
dt = DarkfiTable(0, RUNNING_TIME, controller_type, kp=-0.010399999999938556, ki=-0.0365999996461878, kd=0, r_kp=rkp, r_ki=rki, r_kd=0)
RND_NODES = random.randint(5, NODES) if randomize_nodes else NODES
dt = DarkfiTable(sum([distribution[i] for i in range(RND_NODES)]), RUNNING_TIME, controller_type, kp=-0.010399999999938556, ki=-0.0365999996461878, kd=0, r_kp=rkp, r_ki=rki, r_kd=0, fee_kp=-0.068188, fee_ki=-0.000205)
for idx in range(0,RND_NODES):
darkie = Darkie(distribution[idx], strategy=random_strategy(EPOCH_LENGTH))
dt.add_darkie(darkie)
acc, apy, reward, stake_ratio, apr = dt.background(rand_running_time, hp)
return acc, apy, reward, stake_ratio, apr
acc, cc_acc, apy, reward, stake_ratio, apr = dt.background(rand_running_time, hp)
return acc, cc_acc, apy, reward, stake_ratio, apr
def multi_trial_exp(kp, ki, distribution = [], hp=True):
global highest_apr
@@ -61,21 +60,24 @@ def multi_trial_exp(kp, ki, distribution = [], hp=True):
global lowest_apr2target_diff
new_record=False
accs = []
cc_accs = []
aprs = []
rewards = []
stakes_ratios = []
aprs = []
for i in range(0, AVG_LEN):
acc, apy, reward, stake_ratio, apr = experiment(CONTROLLER_TYPE_DISCRETE, rkp=kp, rki=ki, distribution=distribution, hp=hp)
acc, cc_acc, apy, reward, stake_ratio, apr = experiment(CONTROLLER_TYPE_DISCRETE, rkp=kp, rki=ki, distribution=distribution, hp=hp)
accs += [acc]
cc_accs += [cc_acc]
rewards += [reward]
aprs += [apr]
stakes_ratios += [stake_ratio]
avg_acc = float(sum(accs))/AVG_LEN
avg_cc_acc = float(sum(cc_accs))/AVG_LEN
avg_reward = float(sum(rewards))/AVG_LEN
avg_staked = float(sum(stakes_ratios))/AVG_LEN
avg_apr = float(sum(aprs))/AVG_LEN
buff = 'avg(acc): {}, avg(apr): {}, avg(reward): {}, avg(stake ratio): {}, kp: {}, ki:{}, '.format(avg_acc, avg_apr, avg_reward, avg_staked, kp, ki)
buff = 'avg(acc): {}, avg(cc_accs): {}, avg(apr): {}, avg(reward): {}, avg(stake ratio): {}, kp: {}, ki:{}, '.format(avg_acc, avg_cc_acc, avg_apr, avg_reward, avg_staked, kp, ki)
if avg_apr > 0:
gain = (kp, ki)
acc_gain = (avg_apr, gain)
@@ -116,8 +118,7 @@ def crawler(crawl, range_multiplier, step=0.1):
step*=10
np.random.shuffle(crawl_range)
crawl_range = tqdm(crawl_range)
#distribution = [random.gauss(ERC20DRK/NODES, ERC20DRK/NODES*0.1) for i in range(NODES)]
distribution = [0 for i in range(NODES)]
distribution = [random.gauss(ERC20DRK/NODES, ERC20DRK/NODES*0.1) for i in range(NODES)]
for i in crawl_range:
kp = i if crawl==KP else highest_gain[0]
ki = i if crawl==KI else highest_gain[1]

View File

@@ -3,13 +3,13 @@ from core.lottery import DarkfiTable
from core.utils import *
from core.darkie import Darkie
from tqdm import tqdm
from core.strategy import SigmoidStrategy
from core.strategy import *
import os
AVG_LEN = 5
KP_STEP=0.01
KP_SEARCH= -0.01
KP_SEARCH=-0.01
KI_STEP=0.01
KI_SEARCH=-0.036
@@ -17,11 +17,10 @@ KI_SEARCH=-0.036
KD_STEP=0.01
KD_SEARCH=0.0384
EPSILON=0.0001
RUNNING_TIME=10000
RUNNING_TIME=1000
NODES = 1000
highest_acc = 0
highest_acc = 0.2
KP='kp'
KI='ki'
@@ -45,12 +44,12 @@ rand_running_time = args.rand_running_time
debug = args.debug
def experiment(controller_type=CONTROLLER_TYPE_DISCRETE, kp=0, ki=0, kd=0, distribution=[], hp=True):
dt = DarkfiTable(ERC20DRK, RUNNING_TIME, controller_type, kp=kp, ki=ki, kd=kd)
RND_NODES = random.randint(5, NODES) if randomize_nodes else NODES
dt = DarkfiTable(sum([distribution[i] for i in range(RND_NODES)]), RUNNING_TIME, controller_type, kp=kp, ki=ki, kd=kd)
for idx in range(0,RND_NODES):
darkie = Darkie(distribution[idx], strategy=SigmoidStrategy(EPOCH_LENGTH))
darkie = Darkie(distribution[idx], strategy=random_strategy(EPOCH_LENGTH))
dt.add_darkie(darkie)
acc, apy, reward, stake_ratio, apr = dt.background(rand_running_time, hp)
acc, cc_acc, apy, reward, stake_ratio, apr = dt.background(rand_running_time, hp)
return acc
def multi_trial_exp(kp, ki, kd, distribution = [], hp=True):

View File

@@ -3,7 +3,7 @@ from core.lottery import DarkfiTable
from core.utils import *
from core.darkie import Darkie
from tqdm import tqdm
from core.strategy import SigmoidStrategy
from core.strategy import *
import os
AVG_LEN = 5
@@ -18,7 +18,7 @@ EPSILON=0.0001
RUNNING_TIME=1000
NODES = 1000
highest_acc = 0
highest_acc = 0.2
KP='kp'
KI='ki'
@@ -39,13 +39,13 @@ randomize_nodes = args.randomize_nodes
rand_running_time = args.rand_running_time
debug = args.debug
def experiment(controller_type=CONTROLLER_TYPE_DISCRETE, kp=0, ki=0, distribution=[], hp=True):
dt = DarkfiTable(ERC20DRK, RUNNING_TIME, controller_type, kp=kp, ki=ki, kd=0)
def experiment(controller_type, kp, ki, distribution=[], hp=True):
RND_NODES = random.randint(5, NODES) if randomize_nodes else NODES
dt = DarkfiTable(sum([distribution[i] for i in range(RND_NODES)]), RUNNING_TIME, controller_type, kp=kp, ki=ki)
for idx in range(0,RND_NODES):
darkie = Darkie(distribution[idx], strategy=SigmoidStrategy(EPOCH_LENGTH))
darkie = Darkie(distribution[idx], strategy=random_strategy(EPOCH_LENGTH))
dt.add_darkie(darkie)
acc, apy, reward, stake_ratio, apr = dt.background(rand_running_time, hp)
acc, cc_acc, apy, reward, stake_ratio, apr = dt.background(rand_running_time, hp)
return acc
def multi_trial_exp(kp, ki, distribution = [], hp=True):