top of page

Free indicators

Public·5 members

AI RSI - Deep Learning LSTM

declare lower;

# ─────────────────────────────────────────────

# │ MarketFragments.com | DNA & Market │

# │ info@marketfragments.com │

# │ www.marketfragments.com │

# ─────────────────────────────────────────────

# Time →

# │

# █ █ █│ █

# █ █ █ │ █ █

# █ █ █ │ █ █ █ ╭─╮

# █ █ █ │ █ █ █ █ ╭─╯ ╰─╮

# █ █ █ │ █ █ █ █ █ █ ╭─╯ ╰─╮

# █ █ █ │ █ █ █ █ █ █ █ █ ╭─╯ ╰─╮

# █ █ █ │ █ █ █ █ █ █ █ █ █ █╭─╯ ╰─╮

# █ █ █ │ █ █ █ █ █ █ █ █ ╰─╮ ╭─╯

# █ █ █ │ █ █ █ █ █ █ ╰─╮ ╭─╯

# █ █ █ │ █ █ █ █ ╰─╮ ╭─╯

# █ █ █ │ █ █ ╰─╮ ╭─╯

# █ █ █ │ █ ╰─────╯

# ──────┴──────────────────────────────────────────────────────────────

# T1 T2 T3 T4 T5 T6

#


# RSI LSTM

# Sigmoid Activation Function

script sigmoid {

input x = 1;

def calc = 1 / (1 + Exp(-x));

plot return = calc;

}


# Tanh Activation Function

script Tanh {

input x = 1;

def calc = (Exp(x) - Exp(-x)) / (Exp(x) + Exp(-x));

plot return = calc;

}


# LSTM Cell

script LSTM_Cell {

input data = 1;

input prev_state = 0;

input prev_memory = 0;

input weights = close;

input state_weights = close;

input bias = close;


# Gates

def input_gate = sigmoid(data * weights[0] + prev_state * state_weights[0] + prev_memory * state_weights[1] + bias[0]);

def forget_gate = sigmoid(data * weights[1] + prev_state * state_weights[2] + prev_memory * state_weights[3] + bias[1]);

def output_gate = sigmoid(data * weights[2] + prev_state * state_weights[4] + prev_memory * state_weights[5] + bias[2]);


# Memory Update

def input_modulation = Tanh(data * weights[3] + prev_state * state_weights[6] + bias[3]);

def memory = (forget_gate * prev_memory) + (input_gate * input_modulation);


# LSTM State

def state = output_gate * Tanh(memory);


plot return = state;

}

# LSTM Train Function

script TrainLSTM {

input data = close; # Input data for training

input prev_state = 1; # Initial state

input prev_memory = 1; # Initial memory

input weights = close; # Weights

input state_weights = close; # State weights

input bias = close; # Bias

input training_data_Length = 100; # Training length (number of bars)


# Simulate training over historical data

def datastate = fold i1 = 0 to training_data_Length with d1 = 0 do

if i1 < training_data_Length then

GetValue(data, i1) else 0;


def state = fold i = 0 to training_data_Length with d = 0 do

if i < training_data_Length then

LSTM_Cell(

data = datastate,

prev_state = prev_state,

prev_memory = prev_memory,

weights = weights,

state_weights = state_weights,

bias = bias)

else d;


plot return = state;

}

# Gradient Calculation for Weights

script CalculateGradients {

input output_error = 1;

input output = 1;

input prev_state = 1;

input prev_memory = 1;

input input_data = 1;

input weights = close;


def sigmoid_derivative = output * (1 - output);


# Fold to calculate the sum of gradients for weights

def num_weights = 8; # Total weights

def sum_grad_weight = fold i = 0 to num_weights with grad_sum = 0 do

grad_sum + 2 * output_error * sigmoid_derivative *

(if i == 0 then prev_state * weights[0] else

if i == 1 then prev_memory * weights[1] else

if i == 2 then prev_state * weights[2] else

if i == 3 then prev_memory * weights[3] else

if i == 4 then prev_state * weights[4] else

if i == 5 then prev_memory * weights[5] else

if i == 6 then prev_state else

prev_memory);


plot return = sum_grad_weight;

}


# Gradient Calculation for Bias

script CalculateBiasGradients {

input output_error = 1;

input output = 1;


def sigmoid_derivative = output * (1 - output);


# Fold to calculate the sum of gradients for biases

def num_biases = 4; # Total biases

def sum_grad_bias = fold i = 0 to num_biases with grad_sum = 0 do

grad_sum + 2 * output_error * sigmoid_derivative;


plot return = sum_grad_bias;

}


# RSI Calculation

input rsiLength = 14;

def netChange = close - close[1];

def gain = if netChange > 0 then netChange else 0;

def loss = if netChange < 0 then AbsValue(netChange) else 0;

def avgGain = Average(gain, rsiLength);

def avgLoss = Average(loss, rsiLength);

def rs = if avgLoss != 0 then avgGain / avgLoss else rs[1];

def rsi = 100 - (100 / (1 + rs));


# Normalize RSI

def normalizedRSI = (rsi - Lowest(rsi, rsiLength)) / (Highest(rsi, rsiLength) - Lowest(rsi, rsiLength));


# Initialize Weights and Biases

def bn = BarNumber();

def input_weights = if bn == 1 then Random() else input_weights[1];

def state_weights = if bn == 1 then Random() else state_weights[1];

def bias = if bn == 1 then Random() else bias[1];


# Dynamic Parameter Selection

input learning_rate_initial = 0.01;

input training_data_Length_initial = 100;


input Selection = {

"Custom Input",

"LR 0.01 && Train Length 10",

"LR 0.005 && Train Length 50",

"LR 0.001 && Train Length 100",

"LR 0.02 && Train Length 20",

default "LR 0.005 && Train Length 200"

};


# Dynamic Parameters

def learning_rate;

def training_data_Length;


switch (Selection) {

case "Custom Input":

learning_rate = learning_rate_initial;

training_data_Length = training_data_Length_initial;

case "LR 0.01 && Train Length 10":

learning_rate = 0.01;

training_data_Length = 10;

case "LR 0.005 && Train Length 50":

learning_rate = 0.005;

training_data_Length = 50;

case "LR 0.001 && Train Length 100":

learning_rate = 0.001;

training_data_Length = 100;

case "LR 0.02 && Train Length 20":

learning_rate = 0.02;

training_data_Length = 20;

case "LR 0.005 && Train Length 200":

learning_rate = 0.005;

training_data_Length = 200;

}


# Display Selected Parameters

AddLabel(1, "Learning Rate: " + learning_rate, Color.WHITE);

AddLabel(1, "Training Data Length: " + training_data_Length, Color.YELLOW);


# Train the LSTM Model

def lstm_train_state = TrainLSTM(

data = normalizedRSI,

prev_state = normalizedRSI[1],

prev_memory = normalizedRSI[1],

weights = input_weights,

state_weights = state_weights,

bias = bias,

training_data_Length = training_data_Length);


# Predict Current State Using Initial Weights

def lstm_state = LSTM_Cell(

data = normalizedRSI,

prev_state = lstm_train_state,

prev_memory = normalizedRSI[1],

weights = input_weights,

state_weights = state_weights,

bias = bias);


# Error Calculation

def error = lstm_state - normalizedRSI;


# Gradient-Based Updates

def updated_weights = input_weights - learning_rate * CalculateGradients(

output_error = error,

output = lstm_state,

prev_state = lstm_train_state,

prev_memory = normalizedRSI[1],

input_data = normalizedRSI,

weights = input_weights);


def updated_bias = bias - learning_rate * CalculateBiasGradients(

output_error = error,

output = lstm_state);


# Recompute LSTM State with Updated Weights and Bias

def updated_lstm_state = LSTM_Cell(

data = normalizedRSI,

prev_state = lstm_train_state,

prev_memory = normalizedRSI[1],

weights = updated_weights,

state_weights = updated_weights,

bias = updated_bias);


# Predicted RSI

plot PredictedRSI = updated_lstm_state * (Highest(rsi, rsiLength) - Lowest(rsi, rsiLength)) + Lowest(rsi, rsiLength);

PredictedRSI.SetDefaultColor(Color.MAGENTA);


# Actual RSI

plot ActualRSI = rsi;

ActualRSI.SetDefaultColor(Color.CYAN);


# Error and Updated Parameters

AddLabel(1, "Prediction Error: " + Round(AbsValue(error), 2), if AbsValue(error) > 10 then Color.RED else Color.GREEN);

AddLabel(1, "Updated Weights: " + Round(updated_weights, 4), Color.YELLOW);

AddLabel(1, "Updated Bias: " + Round(updated_bias, 4), Color.CYAN);


# Overbought/Oversold Levels

plot OverSold = 30;

plot OverBought = 70;

plot BuySignal = if PredictedRSI crosses above OverSold then OverSold else Double.NaN;

plot SellSignal = if PredictedRSI crosses below OverBought then OverBought else Double.NaN;


OverSold.SetDefaultColor(GetColor(8));

OverBought.SetDefaultColor(GetColor(8));

BuySignal.SetPaintingStrategy(PaintingStrategy.ARROW_UP);

BuySignal.SetDefaultColor(Color.UPTICK);

SellSignal.SetPaintingStrategy(PaintingStrategy.ARROW_DOWN);

SellSignal.SetDefaultColor(Color.DOWNTICK);

43 Views
Brain with financial data analysis.

Inquiries at :

tel#: (843) 321-8514

Important Risk Notice: Trading involves substantial risk of loss. This is educational content only—not advice. Full details here  ------------>  

Proceed only if you're prepared.

bottom of page