mirror of
https://github.com/darkrenaissance/darkfi.git
synced 2026-01-08 22:28:12 -05:00
[research/lotterysim] add base fee controller crawler for tuning
This commit is contained in:
173
script/research/lotterysim/basefee_discrete_autocrawler_pi.py
Normal file
173
script/research/lotterysim/basefee_discrete_autocrawler_pi.py
Normal file
@@ -0,0 +1,173 @@
|
||||
from argparse import ArgumentParser
|
||||
from core.lottery import DarkfiTable
|
||||
from core.utils import *
|
||||
from core.darkie import Darkie
|
||||
from tqdm import tqdm
|
||||
import os
|
||||
from core.strategy import random_strategy
|
||||
|
||||
AVG_LEN = 10
|
||||
|
||||
KP_STEP=0.0001
|
||||
KP_SEARCH=-0.047919999999999366
|
||||
|
||||
KI_STEP=0.0001
|
||||
KI_SEARCH=-0.00055
|
||||
|
||||
RUNNING_TIME=1000
|
||||
NODES = 1000
|
||||
|
||||
SHIFTING = 0.05
|
||||
|
||||
highest_apr = 0.05
|
||||
highest_acc = 0.2
|
||||
highest_cc_acc = 0.01
|
||||
highest_staked = 0.3
|
||||
lowest_apr2target_diff = 1
|
||||
|
||||
KP='kp'
|
||||
KI='ki'
|
||||
|
||||
KP_RANGE_MULTIPLIER = 1.1
|
||||
KI_RANGE_MULTIPLIER = 1.1
|
||||
|
||||
|
||||
highest_gain = (KP_SEARCH, KI_SEARCH)
|
||||
|
||||
parser = ArgumentParser()
|
||||
parser.add_argument('-p', '--high-precision', action='store_false', default=False)
|
||||
parser.add_argument('-r', '--randomizenodes', action='store_true', default=True)
|
||||
parser.add_argument('-t', '--rand-running-time', action='store_true', default=True)
|
||||
parser.add_argument('-d', '--debug', action='store_false')
|
||||
args = parser.parse_args()
|
||||
high_precision = args.high_precision
|
||||
randomize_nodes = args.randomizenodes
|
||||
rand_running_time = args.rand_running_time
|
||||
debug = args.debug
|
||||
|
||||
def experiment(controller_type=CONTROLLER_TYPE_DISCRETE, fkp=0, fki=0, distribution=[], hp=True):
|
||||
dt = DarkfiTable(ERC20DRK, RUNNING_TIME, controller_type, kp=-0.010399999999938556, ki=-0.0365999996461878, kd=0.03840000000000491, r_kp=-0.719, r_ki=1.6, r_kd=0.1, fee_kp=fkp, fee_ki=fki)
|
||||
RND_NODES = random.randint(5, NODES) if randomize_nodes else NODES
|
||||
for idx in range(0,RND_NODES):
|
||||
darkie = Darkie(distribution[idx], strategy=random_strategy(EPOCH_LENGTH))
|
||||
dt.add_darkie(darkie)
|
||||
acc, cc_acc, apy, reward, stake_ratio, apr = dt.background(rand_running_time, hp)
|
||||
return acc, cc_acc, apy, reward, stake_ratio, apr
|
||||
|
||||
def multi_trial_exp(kp, ki, distribution = [], hp=True):
|
||||
global highest_apr
|
||||
global highest_cc_acc
|
||||
global highest_acc
|
||||
global highest_staked
|
||||
global highest_gain
|
||||
global lowest_apr2target_diff
|
||||
new_record=False
|
||||
accs = []
|
||||
aprs = []
|
||||
rewards = []
|
||||
stakes_ratios = []
|
||||
aprs = []
|
||||
cc_accs = []
|
||||
for i in range(0, AVG_LEN):
|
||||
acc, cc_acc, apy, reward, stake_ratio, apr = experiment(CONTROLLER_TYPE_DISCRETE, fkp=kp, fki=ki, distribution=distribution, hp=hp)
|
||||
accs += [acc]
|
||||
cc_accs += [cc_acc]
|
||||
rewards += [reward]
|
||||
aprs += [apr]
|
||||
stakes_ratios += [stake_ratio]
|
||||
avg_acc = float(sum(accs))/AVG_LEN
|
||||
avg_cc_acc = float(sum(cc_accs))/AVG_LEN if len(cc_accs) else 0
|
||||
avg_reward = float(sum(rewards))/AVG_LEN
|
||||
avg_staked = float(sum(stakes_ratios))/AVG_LEN
|
||||
avg_apr = float(sum(aprs))/AVG_LEN
|
||||
buff = 'avg(acc): {}, avg(cc_acc): {}, avg(apr): {},avg(reward): {}, avg(stake ratio): {}, kp: {}, ki:{}, '.format(avg_acc, avg_cc_acc, avg_apr, avg_reward, avg_staked, kp, ki)
|
||||
|
||||
print('avg_cc_acc: {}'.format(avg_cc_acc))
|
||||
if avg_cc_acc > highest_cc_acc:
|
||||
#if avg_apr > 0:
|
||||
gain = (kp, ki)
|
||||
acc_gain = (avg_apr, gain)
|
||||
apr2target_diff = math.fabs(avg_apr - float(TARGET_APR))
|
||||
#if avg_acc > highest_acc and apr2target_diff < 0.08:
|
||||
#if avg_cc_acc > highest_cc_acc:
|
||||
new_record = True
|
||||
highest_apr = avg_apr
|
||||
highest_acc = avg_acc
|
||||
highest_cc_acc = avg_cc_acc
|
||||
highest_staked = avg_staked
|
||||
highest_gain = (kp, ki)
|
||||
lowest_apr2target_diff = apr2target_diff
|
||||
with open('log'+os.sep+"highest_gain.txt", 'w') as f:
|
||||
f.write(buff)
|
||||
return buff, new_record
|
||||
|
||||
def crawler(crawl, range_multiplier, step=0.1):
|
||||
start = None
|
||||
if crawl==KP:
|
||||
start = highest_gain[0]
|
||||
elif crawl==KI:
|
||||
start = highest_gain[1]
|
||||
|
||||
range_start = (start*range_multiplier if start <=0 else -1*start)
|
||||
range_end = (-1*start if start<=0 else range_multiplier*start)
|
||||
# if number of steps under 10 step resize the step to 50
|
||||
while (range_end-range_start)/step < 10:
|
||||
range_start -= SHIFTING
|
||||
range_end += SHIFTING
|
||||
step /= 10
|
||||
|
||||
while True:
|
||||
try:
|
||||
crawl_range = np.arange(range_start, range_end, step)
|
||||
break
|
||||
except Exception as e:
|
||||
print('start: {}, end: {}, step: {}, exp: {}'.format(range_start, rang_end, step, e))
|
||||
step*=10
|
||||
np.random.shuffle(crawl_range)
|
||||
crawl_range = tqdm(crawl_range)
|
||||
mu = ERC20DRK/NODES
|
||||
distribution = [random.gauss(mu, mu/10) for i in range(NODES)]
|
||||
for i in crawl_range:
|
||||
kp = i if crawl==KP else highest_gain[0]
|
||||
ki = i if crawl==KI else highest_gain[1]
|
||||
buff, new_record = multi_trial_exp(kp, ki, distribution, hp=high_precision)
|
||||
crawl_range.set_description('highest:{} / {}'.format(highest_cc_acc, buff))
|
||||
if new_record:
|
||||
break
|
||||
|
||||
while True:
|
||||
prev_highest_gain = highest_gain
|
||||
# kp crawl
|
||||
crawler(KP, KP_RANGE_MULTIPLIER, KP_STEP)
|
||||
if highest_gain[0] == prev_highest_gain[0]:
|
||||
KP_RANGE_MULTIPLIER+=1
|
||||
KP_STEP/=10
|
||||
else:
|
||||
start = highest_gain[0]
|
||||
range_start = (start*KP_RANGE_MULTIPLIER if start <=0 else -1*start) - SHIFTING
|
||||
range_end = (-1*start if start<=0 else KP_RANGE_MULTIPLIER*start) + SHIFTING
|
||||
while (range_end - range_start)/KP_STEP >500:
|
||||
#if KP_STEP < 0.1:
|
||||
KP_STEP*=2
|
||||
KP_RANGE_MULTIPLIER-=1
|
||||
#TODO (res) shouldn't the range also shrink?
|
||||
# not always true.
|
||||
# how to distinguish between thrinking range, and large step?
|
||||
# good strategy is step shoudn't > 0.1
|
||||
# range also should be > 0.8
|
||||
# what about range multiplier?
|
||||
|
||||
# ki crawl
|
||||
crawler(KI, KI_RANGE_MULTIPLIER, KI_STEP)
|
||||
if highest_gain[1] == prev_highest_gain[1]:
|
||||
KI_RANGE_MULTIPLIER+=1
|
||||
KI_STEP/=10
|
||||
else:
|
||||
start = highest_gain[1]
|
||||
range_start = (start*KI_RANGE_MULTIPLIER if start <=0 else -1*start) - SHIFTING
|
||||
range_end = (-1*start if start<=0 else KI_RANGE_MULTIPLIER*start) + SHIFTING
|
||||
while (range_end - range_start)/KI_STEP >500:
|
||||
#print('range_end: {}, range_start: {}, ki_step: {}'.format(range_end, range_start, KI_STEP))
|
||||
#if KP_STEP < 1:
|
||||
KI_STEP*=2
|
||||
KI_RANGE_MULTIPLIER-=1
|
||||
@@ -47,7 +47,7 @@ FEE_TARGET = MAX_BLOCK_CC
|
||||
# max fee base value
|
||||
FEE_MAX = 1
|
||||
# min fee base value
|
||||
FEE_MIN = 0.0001
|
||||
FEE_MIN = 0.00001
|
||||
# negligible value added to denominator to avoid invalid division by zero
|
||||
EPSILON = 1
|
||||
# window of accuracy calculation
|
||||
@@ -68,3 +68,4 @@ EPSILON_HP = Num(EPSILON)
|
||||
REWARD_MIN_HP = Num(REWARD_MIN)
|
||||
REWARD_MAX_HP = Num(REWARD_MAX)
|
||||
BASE_L_HP = Num(BASE_L)
|
||||
CC_DIFF_EPSILON=0.0001
|
||||
|
||||
@@ -13,6 +13,7 @@ class Darkie():
|
||||
self.strategy = strategy
|
||||
self.slot = 0
|
||||
self.won_hist = [] # winning history boolean
|
||||
self.fees = []
|
||||
|
||||
def clone(self):
|
||||
return Darkie(self.stake)
|
||||
@@ -176,8 +177,13 @@ class Darkie():
|
||||
deduct tip paid to miner plus burned base fee or computational cost.
|
||||
"""
|
||||
def pay_fee(self, fee):
|
||||
if fee>0:
|
||||
self.fees += [fee]
|
||||
self.stake -= fee
|
||||
|
||||
def last_fee(self):
|
||||
return self.fees[-1] if len(self.fees)>0 else 0
|
||||
|
||||
class Tx(object):
|
||||
def __init__(self, size):
|
||||
self.tx = [random.random() for _ in range(size)]
|
||||
|
||||
@@ -18,11 +18,14 @@ class DarkfiTable:
|
||||
print('secondary min/max : {}/{}'.format(self.secondary_pid.clip_min, self.secondary_pid.clip_max))
|
||||
self.primary_pid = PrimaryDiscretePID(kp=r_kp, ki=r_ki, kd=r_kd) if controller_type==CONTROLLER_TYPE_DISCRETE else PrimaryTakahashiPID(kc=kc, ti=ti, td=td, ts=ts)
|
||||
print('primary min/max : {}/{}'.format(self.primary_pid.clip_min, self.primary_pid.clip_max))
|
||||
self.basefee_pid = FeePID(kp=fee_kp, ki=fee_ki, kd=fee_kd) if controller_type==CONTROLLER_TYPE_DISCRETE else SecondaryTakahashiPID(kc=fee_kc, ti=fee_ti, td=fee_td, ts=fee_ts)
|
||||
self.basefee_pid = FeePID(kp=fee_kp, ki=fee_ki, kd=fee_kd)
|
||||
self.debug=debug
|
||||
self.rewards = []
|
||||
self.winners = [1]
|
||||
self.computational_cost = [0]
|
||||
self.base_fee = []
|
||||
self.tips_avg = []
|
||||
self.cc_diff = []
|
||||
|
||||
def add_darkie(self, darkie):
|
||||
self.darkies+=[darkie]
|
||||
@@ -70,8 +73,12 @@ class DarkfiTable:
|
||||
is_slashed = self.reward_slash_lead(debug)
|
||||
if is_slashed==False:
|
||||
self.resolve_fork(slot, debug)
|
||||
|
||||
rt_range.set_description('epoch: {}, fork: {}, winners: {}, issuance {} DRK, acc: {}%, stake: {}%, sr: {}%, reward:{}, apr: {}%, avg(y): {}, avg(T): {}'.format(int(slot/EPOCH_LENGTH), self.merge_length(), self.winners[-1], round(self.Sigma,2), round(acc*100, 2), round(total_stake/self.Sigma*100 if self.Sigma>0 else 0,2), round(self.avg_stake_ratio()*100,2) , round(self.rewards[-1],2), round(self.avg_apr()*100,2), sum(Ys)/len(Ys), sum(Ts)/len(Ts) ))
|
||||
avg_y = sum(Ys)/len(Ys)
|
||||
avg_t = sum(Ts)/len(Ts)
|
||||
avg_tip = self.tips_avg[-1] if len(self.tips_avg)>0 else 0
|
||||
base_fee = self.base_fee[-1] if len(self.base_fee)>0 else 0
|
||||
cc_diff = self.cc_diff[-1] if len(self.cc_diff)>0 else 0
|
||||
rt_range.set_description('epoch: {}, fork: {}, winners: {}, issuance {} DRK, f: {}, acc: {}%, stake: {}%, sr: {}%, reward:{}, apr: {}%, basefee: {}, avg(fee): {}, cc_diff: {}, avg(y): {}, avg(T): {}'.format(int(slot/EPOCH_LENGTH), self.merge_length(), self.winners[-1], round(self.Sigma,2), round(f, 5), round(acc*100, 2), round(total_stake/self.Sigma*100 if self.Sigma>0 else 0,2), round(self.avg_stake_ratio()*100,2) , round(self.rewards[-1],2), round(self.avg_apr()*100,2), round(base_fee, 4), round(avg_tip, 2), round(cc_diff, 2), round(float(avg_y), 2), round(float(avg_t), 2)))
|
||||
#assert round(total_stake,1) <= round(self.Sigma,1), 'stake: {}, sigma: {}'.format(total_stake, self.Sigma)
|
||||
slot+=1
|
||||
self.end_time=time.time()
|
||||
@@ -79,7 +86,8 @@ class DarkfiTable:
|
||||
stake_ratio = self.avg_stake_ratio()
|
||||
avg_apy = self.avg_apy()
|
||||
avg_apr = self.avg_apr()
|
||||
return self.secondary_pid.acc_percentage(), avg_apy, avg_reward, stake_ratio, avg_apr
|
||||
cc_diff_avg = sum([0 if math.fabs(i)<CC_DIFF_EPSILON else 1 for i in self.cc_diff])/len(self.cc_diff) if len(self.cc_diff)>0 else 0
|
||||
return self.secondary_pid.acc_percentage(), cc_diff_avg, avg_apy, avg_reward, stake_ratio, avg_apr
|
||||
|
||||
"""
|
||||
reward single lead, or slash lead with probability len(self.darkies)**-1
|
||||
@@ -137,16 +145,19 @@ class DarkfiTable:
|
||||
for darkie in self.darkies:
|
||||
txs += [darkie.tx()]
|
||||
ret, actual_cc = DarkfiTable.auction(txs)
|
||||
self.computational_cost += [actual_cc]
|
||||
self.cc_diff += [MAX_BLOCK_CC - actual_cc]
|
||||
tips = ret[0]
|
||||
idxs = ret[1]
|
||||
self.tips_avg += [tips/len(idxs) if len(idxs)>0 else 0]
|
||||
basefee = self.basefee_pid.pid_clipped(self.computational_cost[-1], debug)
|
||||
assert basefee<=1
|
||||
self.base_fee+=[basefee]
|
||||
for idx in idxs:
|
||||
fee = txs[idx].cc()+basefee
|
||||
self.darkies[idx].pay_fee(fee)
|
||||
#print("charging darkie[{}]: {} DRK per tx of length: {}, burning: {}".format(idx, fee, len(txs[idx]), basefee))
|
||||
self.darkies[darkie_lead_idx].pay_fee(-1*tips)
|
||||
self.computational_cost += [actual_cc]
|
||||
|
||||
# subtract base fee from total stake
|
||||
self.Sigma -= basefee*len(txs)
|
||||
|
||||
|
||||
@@ -8,6 +8,10 @@ class RPID(BasePID):
|
||||
def __init__(self, controller_type, kp=0, ki=0, kd=0, dt=1, Kc=0, Ti=0, Td=0, Ts=0, debug=False):
|
||||
BasePID.__init__(self, PRIMARY_REWARD_TARGET, REWARD_MIN, REWARD_MAX, controller_type, kp=kp, ki=ki, kd=kd, dt=dt, Kc=Kc, Ti=Ti, Td=Td, Ts=Ts, debug=debug, type='reward', swap_error_fn=True)
|
||||
|
||||
class FeePID(BasePID):
|
||||
def __init__(self, kp=0, ki=0, kd=0, dt=1, Kc=0, Ti=0, Td=0, Ts=0, debug=False):
|
||||
BasePID.__init__(self, MAX_BLOCK_CC, FEE_MIN, FEE_MAX, CONTROLLER_TYPE_DISCRETE, kp=kp, ki=ki, kd=kd, dt=dt, Kc=Kc, Ti=Ti, Td=Td, Ts=Ts, debug=debug)
|
||||
|
||||
|
||||
class PrimaryDiscretePID(RPID):
|
||||
def __init__(self, kp, ki, kd):
|
||||
@@ -31,8 +35,3 @@ class SecondaryDiscretePID(LeadPID):
|
||||
class SecondaryTakahashiPID(LeadPID):
|
||||
def __init__(self, kc, ti, td, ts):
|
||||
LeadPID.__init__(self, CONTROLLER_TYPE_TAKAHASHI, Kc=kc, Ti=ti, Td=td, Ts=ts)
|
||||
|
||||
|
||||
class FeePID(BasePID):
|
||||
def __init__(self, kp=0, ki=0, kd=0, dt=1, Kc=0, Ti=0, Td=0, Ts=0, debug=False):
|
||||
BasePID.__init__(self, MAX_BLOCK_SIZE, FEE_MIN, FEE_MAX, CONTROLLER_TYPE_DISCRETE, kp=kp, ki=ki, kd=kd, dt=dt, Kc=Kc, Ti=Ti, Td=Td, Ts=Ts, debug=debug)
|
||||
|
||||
Reference in New Issue
Block a user