mirror of
https://github.com/darkrenaissance/darkfi.git
synced 2026-01-09 14:48:08 -05:00
[research/lotterysim] reorg lotterysim, acc_staked_ratio plot added
This commit is contained in:
@@ -1,4 +0,0 @@
|
||||
all:
|
||||
python main.py && python draw.py
|
||||
plot:
|
||||
python elbow.py
|
||||
@@ -16,17 +16,17 @@ with $k_1 = k_p + K_i + K_d$, $k_2 = -K_p -2K_d$, $k_3 = K_d$, and e is the er
|
||||
# simulation criterion
|
||||
find $K_p$, $k_i$, $K_d$ for highest accuracy running the simulation on N trials, of random number of nodes, starting with random airdrop (that all sum to total network stake), running for random runing time.
|
||||
|
||||

|
||||

|
||||
|
||||
notice that best parameters are spread out in the search space, picking the highest of which, and running the simulation, running for 600 slots, result in with >36% accuracy
|
||||
|
||||

|
||||

|
||||
|
||||
# comparing range of target values between
|
||||
|
||||
notice below that both y,T in the pallas field, and simulation have same range.
|
||||
|
||||

|
||||

|
||||
|
||||
|
||||
# conclusion
|
||||
|
||||
1
script/research/lotterysim/__init__.py
Normal file
1
script/research/lotterysim/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
from lottery import DarkfiTable
|
||||
0
script/research/lotterysim/core/__init__.py
Normal file
0
script/research/lotterysim/core/__init__.py
Normal file
@@ -1,10 +1,8 @@
|
||||
from utils import *
|
||||
from threading import Thread
|
||||
from strategy import *
|
||||
from core.utils import *
|
||||
from core.strategy import *
|
||||
|
||||
class Darkie(Thread):
|
||||
class Darkie():
|
||||
def __init__(self, airdrop, initial_stake=None, vesting=[], hp=False, commit=True, epoch_len=100, strategy=None, apy_window=EPOCH_LENGTH):
|
||||
Thread.__init__(self)
|
||||
self.vesting = [0] + vesting
|
||||
self.stake = (Num(airdrop) if hp else airdrop)
|
||||
self.initial_stake = [self.stake] # for debugging purpose
|
||||
@@ -55,7 +53,7 @@ class Darkie(Thread):
|
||||
return staked_ratio
|
||||
|
||||
def apy_percentage(self, rewards):
|
||||
return self.apy(rewards)*100
|
||||
return Num(self.apy(rewards)*100)
|
||||
|
||||
def set_sigma_feedback(self, sigma, feedback, f, count, hp=True):
|
||||
self.Sigma = (Num(sigma) if hp else sigma)
|
||||
@@ -1,8 +1,8 @@
|
||||
import matplotlib.pyplot as plt
|
||||
from tqdm import tqdm
|
||||
from darkie import *
|
||||
import time
|
||||
from datetime import timedelta
|
||||
from core.darkie import *
|
||||
from pid.cascade import *
|
||||
|
||||
class DarkfiTable:
|
||||
@@ -1,5 +1,5 @@
|
||||
from utils import *
|
||||
import math
|
||||
from core.utils import *
|
||||
|
||||
class Strategy(object):
|
||||
def __init__(self, epoch_len=0):
|
||||
@@ -1,7 +1,7 @@
|
||||
import random
|
||||
import math
|
||||
import numpy as np
|
||||
from constants import *
|
||||
from core.constants import *
|
||||
|
||||
# naive factorial
|
||||
def fact(n, hp=False):
|
||||
@@ -1,127 +0,0 @@
|
||||
from lottery import *
|
||||
|
||||
AVG_LEN = 3
|
||||
|
||||
KP_STEP=0.01
|
||||
KP_SEARCH=0.5
|
||||
|
||||
KI_STEP=0.01
|
||||
KI_SEARCH=0.05
|
||||
|
||||
KD_STEP=0.01
|
||||
KD_SEARCH=-0.36
|
||||
|
||||
EPSILON=0.0001
|
||||
RUNNING_TIME=100
|
||||
|
||||
#AIRDROP=1000
|
||||
NODES=500
|
||||
|
||||
highest_acc = 0
|
||||
|
||||
|
||||
KP='kp'
|
||||
KI='ki'
|
||||
KD='kd'
|
||||
|
||||
crawl = KP
|
||||
crawl_str = input("crawl (kp/ki/kd):")
|
||||
|
||||
if crawl_str == KI:
|
||||
crawl=KI
|
||||
elif crawl_str == KD:
|
||||
crawl=KD
|
||||
|
||||
|
||||
high_precision_str = input("high precision arith (slooow) (y/n):")
|
||||
high_precision = True if high_precision_str.lower()=="y" else False
|
||||
|
||||
|
||||
randomize_nodes_str = input("randomize number of nodes (y/n):")
|
||||
randomize_nodes = True if randomize_nodes_str.lower()=="y" else False
|
||||
|
||||
rand_running_time_str = input("random running time (y/n):")
|
||||
rand_running_time = True if rand_running_time_str.lower()=="y" else False
|
||||
|
||||
debug_str = input("debug mode (y/n):")
|
||||
debug = True if debug_str.lower()=="y" else False
|
||||
|
||||
|
||||
|
||||
def experiment(accs=[], controller_type=CONTROLLER_TYPE_DISCRETE, kp=0, ki=0, kd=0, distribution=[], hp=False):
|
||||
dt = DarkfiTable(sum(distribution), RUNNING_TIME, controller_type, kp=kp, ki=ki, kd=kd)
|
||||
RND_NODES = random.randint(5, NODES) if randomize_nodes else NODES
|
||||
for idx in range(0,RND_NODES):
|
||||
darkie = Darkie(distribution[idx])
|
||||
dt.add_darkie(darkie)
|
||||
acc = dt.background(rand_running_time, hp)
|
||||
print('acc: {}'.format(acc))
|
||||
accs+=[acc]
|
||||
return acc
|
||||
|
||||
|
||||
def multi_trial_exp(gains, kp, ki, kd, distribution = [], hp=False):
|
||||
global highest_acc
|
||||
accs = []
|
||||
for i in range(0, AVG_LEN):
|
||||
acc = experiment(accs, CONTROLLER_TYPE_DISCRETE, kp=kp, ki=ki, kd=kd, distribution=distribution, hp=hp)
|
||||
accs += [acc]
|
||||
|
||||
avg_acc = sum(accs)/float(AVG_LEN)
|
||||
buff = 'accuracy:{}, kp: {}, ki:{}, kd:{}'.format(avg_acc, kp, ki, kd)
|
||||
print(buff)
|
||||
if avg_acc > 0:
|
||||
gain = (avg_acc, (kp, ki, kd))
|
||||
gains += [gain]
|
||||
if avg_acc > highest_acc:
|
||||
highest_acc = avg_acc
|
||||
with open("highest_gain.txt", 'w') as f:
|
||||
f.write(buff)
|
||||
|
||||
def single_trial_exp(gains, kp, ki, kd, distribution=[], hp=False):
|
||||
global highest_acc
|
||||
acc = experiment(kp=kp, ki=ki, kd=kd, distribution=distribution, hp=hp)
|
||||
buff = 'accuracy:{}, kp: {}, ki:{}, kd:{}'.format(acc, kp, ki, kd)
|
||||
print(buff)
|
||||
if acc > 0:
|
||||
gain = (acc, (kp, ki, kd))
|
||||
gains += [gain]
|
||||
if acc > highest_acc:
|
||||
highest_acc = acc
|
||||
with open("highest_gain.txt", 'w') as f:
|
||||
f.write(buff)
|
||||
gains += [gain]
|
||||
|
||||
|
||||
gains = []
|
||||
if __name__ == "__main__":
|
||||
crawl_range = None
|
||||
start = None
|
||||
if crawl==KP:
|
||||
start = KP_SEARCH
|
||||
step = KP_STEP
|
||||
elif crawl==KI:
|
||||
start = KI_SEARCH
|
||||
step = KI_STEP
|
||||
elif crawl==KD:
|
||||
start = KD_SEARCH
|
||||
step = KD_STEP
|
||||
step = 0.01
|
||||
rhs = np.arange(start, start*3, step) if start>=0 else np.arange(start*3, start, step)
|
||||
lhs = np.flip(np.arange(-3*start, start, step)) if start<0 else np.flip(np.arange(start, -3*start, step))
|
||||
crawl_range=tqdm(np.concatenate((rhs, lhs)))
|
||||
distribution = [random.random() for i in range(NODES)]
|
||||
for i in crawl_range:
|
||||
crawl_range.set_description("crawling {} at {}".format(crawl, i))
|
||||
kp = i if crawl==KP else KP_SEARCH
|
||||
ki = i if crawl==KI else KI_SEARCH
|
||||
kd = i if crawl==KD else KD_SEARCH
|
||||
multi_trial_exp(gains, kp, ki, kd, distribution, hp=high_precision)
|
||||
|
||||
gains=sorted(gains, key=lambda i: i[0], reverse=True)
|
||||
with open("gains.txt", "w") as f:
|
||||
buff=''
|
||||
for gain in gains:
|
||||
line=str(gain[0])+',' +','.join([str(i) for i in gain[1]])+'\n'
|
||||
buff+=line
|
||||
f.write(buff)
|
||||
@@ -1,16 +1,15 @@
|
||||
from lottery import *
|
||||
import os
|
||||
import numpy
|
||||
from strategy import LinearStrategy
|
||||
from core.strategy import *
|
||||
from core.lottery import *
|
||||
|
||||
os.system("rm f.hist; rm leads.hist")
|
||||
os.system("rm log/*_feedback.hist; rm log/*_output.hist")
|
||||
|
||||
RUNNING_TIME = int(input("running time:"))
|
||||
|
||||
NODES=100
|
||||
|
||||
if __name__ == "__main__":
|
||||
darkies = []
|
||||
egalitarian = ERC20DRK/NODES
|
||||
darkies = []
|
||||
for id in range(int(NODES)):
|
||||
@@ -2,12 +2,11 @@ import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import os
|
||||
|
||||
LEAD_FILE = 'log'+os.sep+"f_feedback.hist"
|
||||
F_FILE = 'log'+os.sep+"f_output.hist"
|
||||
|
||||
LEAD_FILE = "leads.hist"
|
||||
F_FILE = "f.hist"
|
||||
|
||||
LEAD_PROCESSED_IMG = "lead_history_processed.png"
|
||||
F_PROCESSED_IMG = "f_history_processed.png"
|
||||
LEAD_PROCESSED_IMG = 'img'+os.sep+"feedback_history_processed.png"
|
||||
F_PROCESSED_IMG = 'img'+os.sep+"output_history_processed.png"
|
||||
|
||||
SEP = ","
|
||||
NODES = 1000 # number of nodes logged
|
||||
@@ -17,7 +16,7 @@ with open(LEAD_FILE) as f:
|
||||
nodes = buf.split(SEP)[:-1]
|
||||
node_log = []
|
||||
for i in range(0, len(nodes)):
|
||||
node_log+=[int(nodes[i])]
|
||||
node_log+=[int(float(nodes[i]))]
|
||||
freq_single_lead = sum(np.array(node_log)==1)/float(len(node_log))
|
||||
print("single leader frequency: {}".format(freq_single_lead))
|
||||
plt.plot(node_log)
|
||||
|
||||
BIN
script/research/lotterysim/img/feedback_history_processed.png
Normal file
BIN
script/research/lotterysim/img/feedback_history_processed.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 18 KiB |
BIN
script/research/lotterysim/img/output_history_processed.png
Normal file
BIN
script/research/lotterysim/img/output_history_processed.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 19 KiB |
@@ -1 +1 @@
|
||||
0,0,0.0,1.0,10.0,12.0,10.0,7.0,11.0,9.0,13.0,8.0,9.0,7.0,5.0,8.0,11.0,12.0,11.0,6.0,13.0,8.0,8.0,1.0,2.0,21.0,1.0,0.0,21.0,1.0,1.0,21.0,2.0,1.0,100.0,1.0,0.0,100.0,3.0,2.0,100.0,1.0,2.0,100.0,2.0,1.0,100.0,2.0,2.0,100.0,1.0,2.0,100.0,6.0,6.0,100.0,5.0,4.0,100.0,4.0,3.0,100.0,8.0,7.0,100.0,7.0,8.0,100.0,10.0,14.0,100.0,7.0,10.0,100.0,6.0,5.0,100.0,7.0,9.0,100.0,8.0,8.0,100.0,6.0,5.0,100.0,2.0,8.0,100.0,9.0,4.0,100.0,6.0,6.0,100.0,3.0,3.0,100.0,5.0,9.0,100.0,9.0,
|
||||
0,0,0.0,1.0,11.0,9.0,11.0,5.0,10.0,100.0,6.0,17.0,
|
||||
@@ -1 +1 @@
|
||||
0,0.7290000000000001,0.9999,0.9999,0.9999,0.9999,0.9999,0.9999,0.9999,0.9999,0.9999,0.9999,0.9999,0.9999,0.9999,0.9999,0.9999,0.9999,0.9999,0.9999,0.9999,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,1,0.9999,0.9999,
|
||||
0,0.7290000000000001,0.9999,0.9999,0.9999,0.9999,0.9999,1,0.9999,0.9999,1,
|
||||
@@ -1 +1 @@
|
||||
avg(acc): 0.33812375249501, avg(apy): 1.6658332654209573, avg(reward): 710.959757565603, avg(stake ratio): 60.78435509175097, kp: -0.42000000000000043, ki:2.7100000000000035, kd:-0.23999999999999488
|
||||
avg(acc): 0.11457085828343312, avg(apy): 0.25534075980633647, avg(reward): 59.39393939393939, avg(stake ratio): 16.24329884629891, kp: 0.06000000000000011, ki:-0.19999999999999796, kd:0.0499999999999996
|
||||
@@ -1,7 +1,6 @@
|
||||
from utils import *
|
||||
from core.utils import *
|
||||
from pid.pid_base import BasePID
|
||||
|
||||
|
||||
'''
|
||||
reward primary PID controller.
|
||||
'''
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
from utils import *
|
||||
from core.utils import *
|
||||
import os
|
||||
|
||||
'''
|
||||
base discrete/takahashi PID controller
|
||||
@@ -117,8 +118,8 @@ class BasePID:
|
||||
f.write(buf)
|
||||
|
||||
def write(self, feedback_hist_file='_feedback.hist', output_hist_file='_output.hist'):
|
||||
self.write_feedback(self.type+feedback_hist_file)
|
||||
self.write_fval(self.type+output_hist_file)
|
||||
self.write_feedback('log' + os.sep + self.type+feedback_hist_file)
|
||||
self.write_fval('log'+ os.sep + self.type+output_hist_file)
|
||||
|
||||
def acc(self):
|
||||
return sum(np.array(self.feedback_hist)==1)/float(len(self.feedback_hist))
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
from lottery import *
|
||||
from argparse import ArgumentParser
|
||||
from core.lottery import DarkfiTable
|
||||
from core.utils import *
|
||||
from core.darkie import Darkie
|
||||
from tqdm import tqdm
|
||||
from core.strategy import SigmoidStrategy
|
||||
import os
|
||||
|
||||
AVG_LEN = 5
|
||||
|
||||
@@ -41,7 +46,7 @@ randomize_nodes = args.randomize_nodes
|
||||
rand_running_time = args.rand_running_time
|
||||
debug = args.debug
|
||||
|
||||
def experiment(apys=[], controller_type=CONTROLLER_TYPE_DISCRETE, rkp=0, rki=0, rkd=0, distribution=[], hp=True):
|
||||
def experiment(controller_type=CONTROLLER_TYPE_DISCRETE, rkp=0, rki=0, rkd=0, distribution=[], hp=True):
|
||||
dt = DarkfiTable(ERC20DRK, RUNNING_TIME, controller_type, kp=-0.010399999999938556, ki=-0.0365999996461878, kd=0.03840000000000491, r_kp=rkp, r_ki=rki, r_kd=rkd)
|
||||
RND_NODES = random.randint(5, NODES) if randomize_nodes else NODES
|
||||
for idx in range(0,RND_NODES):
|
||||
@@ -61,7 +66,7 @@ def multi_trial_exp(kp, ki, kd, distribution = [], hp=True):
|
||||
rewards = []
|
||||
stakes_ratios = []
|
||||
for i in range(0, AVG_LEN):
|
||||
acc, apy, reward, stake_ratio = experiment(apys, CONTROLLER_TYPE_DISCRETE, rkp=kp, rki=ki, rkd=kd, distribution=distribution, hp=hp)
|
||||
acc, apy, reward, stake_ratio = experiment(CONTROLLER_TYPE_DISCRETE, rkp=kp, rki=ki, rkd=kd, distribution=distribution, hp=hp)
|
||||
accs += [acc]
|
||||
apys += [apy]
|
||||
rewards += [reward]
|
||||
@@ -81,7 +86,7 @@ def multi_trial_exp(kp, ki, kd, distribution = [], hp=True):
|
||||
highest_acc = avg_acc
|
||||
highest_staked = avg_staked
|
||||
highest_gain = (kp, ki, kd)
|
||||
with open("highest_gain.txt", 'w') as f:
|
||||
with open('log'+os.sep+"highest_gain.txt", 'w') as f:
|
||||
f.write(buff)
|
||||
return buff, new_record
|
||||
|
||||
39
script/research/lotterysim/reports/acc_vs_staked_ratio.py
Normal file
39
script/research/lotterysim/reports/acc_vs_staked_ratio.py
Normal file
@@ -0,0 +1,39 @@
|
||||
from lottery import *
|
||||
import os
|
||||
import numpy
|
||||
from matplotlib import pyplot as plt
|
||||
|
||||
os.system("rm f.hist; rm leads.hist")
|
||||
|
||||
RUNNING_TIME = int(input("running time:"))
|
||||
ERC20DRK=2.1*10**9
|
||||
NODES=1000
|
||||
plot = []
|
||||
EXPS=10
|
||||
for portion in range(1,11):
|
||||
accs = []
|
||||
for _ in range(EXPS):
|
||||
darkies = []
|
||||
egalitarian = ERC20DRK/NODES
|
||||
darkies += [ Darkie(random.gauss(egalitarian, egalitarian*0.1), commit=False) for id in range(int(NODES/portion)) ]
|
||||
#darkies += [Darkie() for _ in range(NODES*2)]
|
||||
airdrop = ERC20DRK
|
||||
effective_airdrop = 0
|
||||
for darkie in darkies:
|
||||
effective_airdrop+=darkie.stake
|
||||
stake_portion = effective_airdrop/airdrop*100
|
||||
print("network airdrop: {}, staked token: {}/{}% on {} nodes".format(airdrop, effective_airdrop, stake_portion, len(darkies)))
|
||||
dt = DarkfiTable(airdrop, RUNNING_TIME, CONTROLLER_TYPE_DISCRETE, kp=0.005999999999989028, ki=-0.005999999985257798, kd=0.01299999999999478)
|
||||
for darkie in darkies:
|
||||
dt.add_darkie(darkie)
|
||||
acc = dt.background(rand_running_time=False)
|
||||
accs += [acc]
|
||||
avg_acc = sum(accs)/EXPS*100
|
||||
plot+=[(stake_portion, avg_acc)]
|
||||
|
||||
|
||||
plt.plot([x[0] for x in plot], [x[1] for x in plot])
|
||||
plt.xlabel('drk staked %')
|
||||
plt.ylabel('accuracy %')
|
||||
plt.savefig('stake.png')
|
||||
plt.show()
|
||||
87
script/research/lotterysim/reports/staking.md
Normal file
87
script/research/lotterysim/reports/staking.md
Normal file
@@ -0,0 +1,87 @@
|
||||
---
|
||||
title: tokens in stake
|
||||
author: ertosns
|
||||
date: 24/3/2023
|
||||
---
|
||||
|
||||
# Staking in darkfi blockchain
|
||||
|
||||
The leadership winning mechanism is based off Ouroborous Crypsinous
|
||||
with some modifications. A stakeholder wins if some random value $y$,
|
||||
specific to the stakeholder and derived from the blockchain, is less
|
||||
than target value $T$. The probability of winning is quasi linear with
|
||||
the relative stake.
|
||||
|
||||
## Least amount of DRK token required for staking
|
||||
|
||||
Accuracy of single leader per slot is affected by percentage of total
|
||||
DRK tokens in stake, in fact the relation is logarithmic.
|
||||
|
||||
Assume community $C$ owns 100% of DRK tokens.
|
||||
|
||||
The probability of $C$ winning the lottery at any slot is defined as:
|
||||
|
||||
\begin{align*}
|
||||
P(C=lead) &= y < 1 -(1-f)^\alpha \\
|
||||
&= y < 1 -(1-f) \\
|
||||
&= y < f
|
||||
\end{align*}
|
||||
|
||||
In our case f is targetting single leader per slot. An emulation of
|
||||
the leader election mechanism with PID controllers shows that f is
|
||||
oscillating around ~0.65 (depending on ration of tokens in stake).
|
||||
|
||||
Then,
|
||||
|
||||
\begin{align*}
|
||||
P(C=lead)~=0.35
|
||||
\end{align*}
|
||||
|
||||
## Linear independence
|
||||
|
||||
Given the linear independence property of the target function T, the
|
||||
probability of a node winning leadership at any slot with S staked tokens
|
||||
is the same as the probability of N nodes winning leadership at same slot,
|
||||
with same stake S for any S, N values.
|
||||
|
||||
### Example
|
||||
|
||||
If the probability of stakeholder owning 0.1% of the tokens is 0.03,
|
||||
then the probability of a pool consisting of stakeholders owning 0.1%
|
||||
of tokens is also 0.03.
|
||||
|
||||
# Tokens in stake
|
||||
|
||||
The probability of a pool of N% stake to win the leadership at any slot is:
|
||||
|
||||
\begin{align*}
|
||||
\frac{N}{100}*P(C=lead)
|
||||
\end{align*}
|
||||
|
||||
|
||||

|
||||
|
||||
## Example
|
||||
|
||||
Assume $P(C=lead)=33%$, then if only 10% of the total network token
|
||||
is staked the probability of having a single leader per slot is 0.03,
|
||||
or accuracy of 3%.
|
||||
|
||||
## Ratio of staked tokens in different networks
|
||||
|
||||
| network | staked ratio |
|
||||
-------------|---------------
|
||||
| Etherum | 16% |
|
||||
| Cardano | 69% |
|
||||
| Solana | 71% |
|
||||
| Bnb chain | 16% |
|
||||
| Polygon | 40% |
|
||||
| Polkadot | 47% |
|
||||
|
||||
# Stake privacy leakage
|
||||
From the graph above, and as a consequence of the linear independence
|
||||
property the accuracy of the controller leaks the percentage of token
|
||||
in stake.
|
||||
|
||||
## Fix stake privacy leakage
|
||||
Instant finality mechanism as khonsu would prevent such leakage.
|
||||
@@ -1,10 +1,15 @@
|
||||
from lottery import *
|
||||
from threading import Thread
|
||||
from argparse import ArgumentParser
|
||||
from core.lottery import DarkfiTable
|
||||
from core.utils import *
|
||||
from core.darkie import Darkie
|
||||
from tqdm import tqdm
|
||||
from core.strategy import SigmoidStrategy
|
||||
import os
|
||||
|
||||
avg_len = 5
|
||||
|
||||
kp_STEP=0.01
|
||||
AVG_LEN = 5
|
||||
|
||||
KP_STEP=0.01
|
||||
KP_SEARCH= -0.04019999999996926
|
||||
|
||||
KI_STEP=0.01
|
||||
@@ -40,13 +45,13 @@ randomize_nodes = args.randomize_nodes
|
||||
rand_running_time = args.rand_running_time
|
||||
debug = args.debug
|
||||
|
||||
def experiment(accs=[], controller_type=CONTROLLER_TYPE_DISCRETE, kp=0, ki=0, kd=0, distribution=[], hp=True):
|
||||
def experiment(controller_type=CONTROLLER_TYPE_DISCRETE, kp=0, ki=0, kd=0, distribution=[], hp=True):
|
||||
dt = DarkfiTable(ERC20DRK, RUNNING_TIME, controller_type, kp=kp, ki=ki, kd=kd)
|
||||
RND_NODES = random.randint(5, NODES) if randomize_nodes else NODES
|
||||
for idx in range(0,RND_NODES):
|
||||
darkie = Darkie(distribution[idx])
|
||||
darkie = Darkie(distribution[idx], strategy=SigmoidStrategy(EPOCH_LENGTH), apy_window=EPOCH_LENGTH)
|
||||
dt.add_darkie(darkie)
|
||||
acc = dt.background(rand_running_time, hp)
|
||||
acc, apy, reward, stake_ratio = dt.background_with_apy(rand_running_time, hp)
|
||||
return acc
|
||||
|
||||
def multi_trial_exp(kp, ki, kd, distribution = [], hp=True):
|
||||
@@ -56,7 +61,7 @@ def multi_trial_exp(kp, ki, kd, distribution = [], hp=True):
|
||||
exp_threads = []
|
||||
accs = []
|
||||
for i in range(0, AVG_LEN):
|
||||
acc = experiment(accs, CONTROLLER_TYPE_DISCRETE, kp=kp, ki=ki, kd=kd, distribution=distribution, hp=hp)
|
||||
acc = experiment(CONTROLLER_TYPE_DISCRETE, kp=kp, ki=ki, kd=kd, distribution=distribution, hp=hp)
|
||||
accs += [acc]
|
||||
avg_acc = sum(accs)/float(AVG_LEN)
|
||||
buff = 'accuracy:{}, kp: {}, ki:{}, kd:{}'.format(avg_acc, kp, ki, kd)
|
||||
@@ -67,7 +72,7 @@ def multi_trial_exp(kp, ki, kd, distribution = [], hp=True):
|
||||
new_record = True
|
||||
highest_acc = avg_acc
|
||||
highest_gain = (kp, ki, kd)
|
||||
with open("highest_gain.txt", 'w') as f:
|
||||
with open('log'+os.sep+"highest_gain.txt", 'w') as f:
|
||||
f.write(buff)
|
||||
return buff, new_record
|
||||
|
||||
@@ -1,5 +1,10 @@
|
||||
from lottery import *
|
||||
from argparse import ArgumentParser
|
||||
from core.lottery import DarkfiTable
|
||||
from core.utils import *
|
||||
from core.darkie import Darkie
|
||||
from tqdm import tqdm
|
||||
from core.strategy import SigmoidStrategy
|
||||
import os
|
||||
|
||||
AVG_LEN = 5
|
||||
|
||||
@@ -45,13 +50,13 @@ rand_running_time = args.rand_running_time
|
||||
debug = args.debug
|
||||
|
||||
|
||||
def experiment(accs=[], controller_type=CONTROLLER_TYPE_TAKAHASHI, kp=0, ki=0, kd=0, kc=0, ti=0, td=0, ts=0, distribution=[], hp=False):
|
||||
def experiment(controller_type=CONTROLLER_TYPE_TAKAHASHI, kp=0, ki=0, kd=0, kc=0, ti=0, td=0, ts=0, distribution=[], hp=False):
|
||||
dt = DarkfiTable(ERC20DRK, RUNNING_TIME, controller_type, kp=kp, ki=ki, kd=kd, kc=kc, td=td, ti=ti, ts=ts)
|
||||
RND_NODES = random.randint(5, NODES) if randomize_nodes else NODES
|
||||
for idx in range(0,RND_NODES):
|
||||
darkie = Darkie(distribution[idx])
|
||||
darkie = Darkie(distribution[idx], strategy=SigmoidStrategy(EPOCH_LENGTH), apy_window=EPOCH_LENGTH)
|
||||
dt.add_darkie(darkie)
|
||||
acc = dt.background(rand_running_time, hp)
|
||||
acc, apy, reward, stake_ratio = dt.background_with_apy(rand_running_time, hp)
|
||||
return acc
|
||||
|
||||
|
||||
@@ -61,7 +66,7 @@ def multi_trial_exp(kc, td, ti, ts, distribution = [], hp=False):
|
||||
new_record = False
|
||||
accs = []
|
||||
for i in range(0, AVG_LEN):
|
||||
acc = experiment(accs, CONTROLLER_TYPE_DISCRETE, kc=kc, ti=ti, td=td, ts=ts, distribution=distribution, hp=hp)
|
||||
acc = experiment(CONTROLLER_TYPE_DISCRETE, kc=kc, ti=ti, td=td, ts=ts, distribution=distribution, hp=hp)
|
||||
accs += [acc]
|
||||
avg_acc = sum(accs)/float(AVG_LEN)
|
||||
buff = 'accuracy:{}, kc: {}, td:{}, ti:{}, ts:{}'.format(avg_acc, kc, td, ti, ts)
|
||||
@@ -72,7 +77,7 @@ def multi_trial_exp(kc, td, ti, ts, distribution = [], hp=False):
|
||||
new_record = True
|
||||
highest_acc = avg_acc
|
||||
highest_gain = gain
|
||||
with open("highest_gain_takahashi.txt", 'w') as f:
|
||||
with open('log'+os.sep+"highest_gain_takahashi.txt", 'w') as f:
|
||||
f.write(buff)
|
||||
return buff, new_record
|
||||
|
||||
@@ -1,16 +1,18 @@
|
||||
from lottery import *
|
||||
from core.lottery import *
|
||||
from core.utils import *
|
||||
from core.strategy import *
|
||||
|
||||
NODES=1000
|
||||
|
||||
RUNNING_TIME = int(input("running time:"))
|
||||
if __name__ == "__main__":
|
||||
darkies = [Darkie(i) for i in range(NODES)]
|
||||
darkies = [Darkie(i, strategy=random_strategy(EPOCH_LENGTH), apy_window=EPOCH_LENGTH) for i in range(NODES)]
|
||||
airdrop = 0
|
||||
for darkie in darkies:
|
||||
airdrop+=darkie.stake
|
||||
dt = DarkfiTable(airdrop, RUNNING_TIME, controller_type=CONTROLLER_TYPE_TAKAHASHI, kc=-2.19, ti=-0.5, td=0.25, ts=-0.35)
|
||||
dt = DarkfiTable(airdrop, RUNNING_TIME, controller_type=CONTROLLER_TYPE_TAKAHASHI, kc=-2.19, ti=-0.5, td=0.25, ts=-0.35, r_kp=-0.42, r_ki=2.71, r_kd=-0.239)
|
||||
for darkie in darkies:
|
||||
dt.add_darkie(darkie)
|
||||
acc = dt.background(True, False)
|
||||
acc, apy, reward, stake_ratio = dt.background_with_apy()
|
||||
print('acc: {}'.format(acc))
|
||||
dt.write()
|
||||
|
||||
Reference in New Issue
Block a user