gru_layer

GRU layer λž€?

GRU (Gated Recurrent Unit) λ ˆμ΄μ–΄λŠ” 반볡 신경망 (Recurrent Neural Network, RNN)의 ν•œ μ’…λ₯˜λ‘œ, κΈ΄ μ‹œν€€μŠ€λ₯Ό μ²˜λ¦¬ν•˜λŠ” 데에 μ‚¬μš©λ©λ‹ˆλ‹€.

GRUλŠ” 기본적으둜 LSTM (Long Short-Term Memory)κ³Ό μœ μ‚¬ν•œ 아이디어λ₯Ό 기반으둜 ν•˜κ³  μžˆμŠ΅λ‹ˆλ‹€. LSTMκ³Ό λ§ˆμ°¬κ°€μ§€λ‘œ, GRU도 RNN κ³„μ—΄μ˜ λ ˆμ΄μ–΄λ‘œμ„œ μ‹œν€€μŠ€ 데이터λ₯Ό μ²˜λ¦¬ν•  수 μžˆμŠ΅λ‹ˆλ‹€. ν•˜μ§€λ§Œ LSTMκ³ΌλŠ” 달리, GRUλŠ” 게이트 λ©”μ»€λ‹ˆμ¦˜μ„ μ‚¬μš©ν•˜μ—¬ 기얡을 λ³΄ν˜Έν•˜κ³ , 이전 μƒνƒœμ—μ„œ 정보λ₯Ό κ°€μ Έμ˜€λŠ” 방법을 κ°„λ‹¨ν™”ν•˜μ—¬ 더 적은 κ³„μ‚°μœΌλ‘œ μž₯기적인 μƒνƒœλ₯Ό μœ μ§€ν•  수 μžˆλ„λ‘ ν•©λ‹ˆλ‹€.

GRUλŠ” LSTM보닀 더 κ°„λ‹¨ν•œ ꡬ쑰λ₯Ό κ°€μ§€κ³  있으며, 더 적은 νŒŒλΌλ―Έν„°λ₯Ό ν•„μš”λ‘œ ν•©λ‹ˆλ‹€. GRUλŠ” LSTM보닀 ν•™μŠ΅ 속도가 더 λΉ λ₯΄κ³ , μž‘μ€ λ°μ΄ν„°μ…‹μ—μ„œ 더 일반적인 λͺ¨λΈμ„ λ§Œλ“€μ–΄λ‚΄λŠ” κ²½ν–₯이 μžˆμŠ΅λ‹ˆλ‹€.

GRU λ ˆμ΄μ–΄λŠ” 2개의 게이트λ₯Ό μ‚¬μš©ν•˜μ—¬ 기얡을 μ‘°μ ˆν•©λ‹ˆλ‹€. 첫 번째 κ²Œμ΄νŠΈλŠ” "μ—…λ°μ΄νŠΈ 게이트"라고 뢈리며, ν˜„μž¬ μž…λ ₯κ³Ό 이전 μƒνƒœλ₯Ό κ²°ν•©ν•˜μ—¬ μƒˆλ‘œμš΄ μƒνƒœλ₯Ό μƒμ„±ν•©λ‹ˆλ‹€. 두 번째 κ²Œμ΄νŠΈλŠ” "μž¬μ„€μ • 게이트"라고 뢈리며, 이전 μƒνƒœμ˜ 일뢀λ₯Ό 버리고 μƒˆλ‘œμš΄ μƒνƒœλ₯Ό λ§Œλ“­λ‹ˆλ‹€. GRU λ ˆμ΄μ–΄λŠ” μ΄λŸ¬ν•œ κ²Œμ΄νŠΈλ“€μ„ μ‚¬μš©ν•˜μ—¬ μž…λ ₯ μ‹œν€€μŠ€μ™€ 이전 μƒνƒœλ₯Ό 기반으둜 ν•œ λ‹€μŒ, μƒˆλ‘œμš΄ μƒνƒœλ₯Ό 좜λ ₯ν•©λ‹ˆλ‹€.

GRU λ ˆμ΄μ–΄λŠ” 주둜 μ‹œν€€μŠ€ 데이터λ₯Ό λ‹€λ£¨λŠ” μžμ—°μ–΄ 처리(NLP) λΆ„μ•Όμ—μ„œ μ‚¬μš©λ©λ‹ˆλ‹€. GRU λ ˆμ΄μ–΄λ₯Ό μ μš©ν•œ λͺ¨λΈμ€ ν…μŠ€νŠΈ 생성, λ²ˆμ—­, 감성 뢄석 λ“± λ‹€μ–‘ν•œ νƒœμŠ€ν¬μ—μ„œ 쒋은 μ„±λŠ₯을 λ³΄μž…λ‹ˆλ‹€.


increment_layer

static void increment_layer(layer *l, int steps)
{
    int num = l->outputs*l->batch*steps;
    l->output += num;
    l->delta += num;
    l->x += num;
    l->x_norm += num;

#ifdef GPU
    l->output_gpu += num;
    l->delta_gpu += num;
    l->x_gpu += num;
    l->x_norm_gpu += num;
#endif
}

ν•¨μˆ˜ 이름: increment_layer

μž…λ ₯:

  • layer *l: μ—…λ°μ΄νŠΈν•  λ ˆμ΄μ–΄

  • int steps: 이동할 μŠ€ν… 수

λ™μž‘:

  • layer ꡬ쑰체 포인터인 l의 output, delta, x, x_norm에 steps만큼 μ΄λ™ν•œ 포인터λ₯Ό ν• λ‹Ήν•œλ‹€.

  • GPU ν™˜κ²½μ—μ„œλŠ” l의 output_gpu, delta_gpu, x_gpu, x_norm_gpu에 steps만큼 μ΄λ™ν•œ 포인터λ₯Ό ν• λ‹Ήν•œλ‹€.

μ„€λͺ…:

  • ν•΄λ‹Ή ν•¨μˆ˜λŠ” λ ˆμ΄μ–΄μ˜ 포인터λ₯Ό steps만큼 μ΄λ™μ‹œμΌœ μ—…λ°μ΄νŠΈν•˜λŠ” ν•¨μˆ˜μ΄λ‹€.

  • 포인터λ₯Ό μ΄λ™μ‹œμΌœμ„œ μ΄μ „μ˜ 값을 μ°Έμ‘°ν•˜μ§€ μ•Šκ³  μƒˆλ‘œμš΄ 값을 μ°Έμ‘°ν•  수 μžˆλ„λ‘ ν•œλ‹€.

  • GPU ν™˜κ²½μ—μ„œλŠ” GPU λ©”λͺ¨λ¦¬ μƒμ˜ 포인터λ₯Ό μ΄λ™μ‹œν‚¨λ‹€.

forward_gru_layer

void forward_gru_layer(layer l, network net)
{
    network s = net;
    s.train = net.train;
    int i;
    layer uz = *(l.uz);
    layer ur = *(l.ur);
    layer uh = *(l.uh);

    layer wz = *(l.wz);
    layer wr = *(l.wr);
    layer wh = *(l.wh);

    fill_cpu(l.outputs * l.batch * l.steps, 0, uz.delta, 1);
    fill_cpu(l.outputs * l.batch * l.steps, 0, ur.delta, 1);
    fill_cpu(l.outputs * l.batch * l.steps, 0, uh.delta, 1);

    fill_cpu(l.outputs * l.batch * l.steps, 0, wz.delta, 1);
    fill_cpu(l.outputs * l.batch * l.steps, 0, wr.delta, 1);
    fill_cpu(l.outputs * l.batch * l.steps, 0, wh.delta, 1);
    if(net.train) {
        fill_cpu(l.outputs * l.batch * l.steps, 0, l.delta, 1);
        copy_cpu(l.outputs*l.batch, l.state, 1, l.prev_state, 1);
    }

    for (i = 0; i < l.steps; ++i) {
        s.input = l.state;
        forward_connected_layer(wz, s);
        forward_connected_layer(wr, s);

        s.input = net.input;
        forward_connected_layer(uz, s);
        forward_connected_layer(ur, s);
        forward_connected_layer(uh, s);


        copy_cpu(l.outputs*l.batch, uz.output, 1, l.z_cpu, 1);
        axpy_cpu(l.outputs*l.batch, 1, wz.output, 1, l.z_cpu, 1);

        copy_cpu(l.outputs*l.batch, ur.output, 1, l.r_cpu, 1);
        axpy_cpu(l.outputs*l.batch, 1, wr.output, 1, l.r_cpu, 1);

        activate_array(l.z_cpu, l.outputs*l.batch, LOGISTIC);
        activate_array(l.r_cpu, l.outputs*l.batch, LOGISTIC);

        copy_cpu(l.outputs*l.batch, l.state, 1, l.forgot_state, 1);
        mul_cpu(l.outputs*l.batch, l.r_cpu, 1, l.forgot_state, 1);

        s.input = l.forgot_state;
        forward_connected_layer(wh, s);

        copy_cpu(l.outputs*l.batch, uh.output, 1, l.h_cpu, 1);
        axpy_cpu(l.outputs*l.batch, 1, wh.output, 1, l.h_cpu, 1);

        if(l.tanh){
            activate_array(l.h_cpu, l.outputs*l.batch, TANH);
        } else {
            activate_array(l.h_cpu, l.outputs*l.batch, LOGISTIC);
        }

        weighted_sum_cpu(l.state, l.h_cpu, l.z_cpu, l.outputs*l.batch, l.output);

        copy_cpu(l.outputs*l.batch, l.output, 1, l.state, 1);

        net.input += l.inputs*l.batch;
        l.output += l.outputs*l.batch;
        increment_layer(&uz, 1);
        increment_layer(&ur, 1);
        increment_layer(&uh, 1);

        increment_layer(&wz, 1);
        increment_layer(&wr, 1);
        increment_layer(&wh, 1);
    }
}

ν•¨μˆ˜ 이름: forward_gru_layer

μž…λ ₯:

  • layer l: GRU λ ˆμ΄μ–΄μ˜ 정보와 λ§€κ°œλ³€μˆ˜λ₯Ό λ‹΄κ³  μžˆλŠ” layer ꡬ쑰체

  • network net: λ„€νŠΈμ›Œν¬μ˜ 정보와 λ§€κ°œλ³€μˆ˜λ₯Ό λ‹΄κ³  μžˆλŠ” network ꡬ쑰체

λ™μž‘:

  • μž…λ ₯ λ°μ΄ν„°μ˜ GRU λ ˆμ΄μ–΄λ₯Ό 톡해 순방ν–₯ μ „νŒŒ(forward propagation)λ₯Ό μˆ˜ν–‰ν•˜λŠ” ν•¨μˆ˜λ‘œ, μž…λ ₯ 데이터λ₯Ό GRU λ ˆμ΄μ–΄λ₯Ό 톡해 μ²˜λ¦¬ν•˜μ—¬ 좜λ ₯ 값을 κ³„μ‚°ν•˜κ³ , κ·Έ 값을 λ‹€μŒ λ ˆμ΄μ–΄μ˜ μž…λ ₯으둜 λ„˜κ²¨μ€Œ.

  • μ΄λ•Œ, backward propagation을 μœ„ν•΄ ν•„μš”ν•œ 쀑간값듀을 μ €μž₯ν•΄ λ†“μŒ.

μ„€λͺ…:

  • GRU λ ˆμ΄μ–΄μ˜ λ§€κ°œλ³€μˆ˜λ“€ μ€‘μ—μ„œ uz, ur, uhλŠ” 이전 μƒνƒœ(previous state)λ‘œλΆ€ν„°μ˜ μž…λ ₯(input)을 μ²˜λ¦¬ν•˜λŠ” κ°€μ€‘μΉ˜(weight) λ§€κ°œλ³€μˆ˜μ΄κ³ , wz, wr, whλŠ” ν˜„μž¬ μž…λ ₯(input)을 μ²˜λ¦¬ν•˜λŠ” κ°€μ€‘μΉ˜ λ§€κ°œλ³€μˆ˜μž„.

  • GRU λ ˆμ΄μ–΄λŠ” μ‹œκ³„μ—΄(sequence) 데이터λ₯Ό μ²˜λ¦¬ν•˜κΈ° μœ„ν•œ RNN의 ν•œ μ’…λ₯˜λ‘œ, 이전 μ‹œμ μ˜ μƒνƒœ(previous state)λ₯Ό μž¬μ‚¬μš©ν•˜λŠ” λ ˆμ΄μ–΄μž„.

  • forward_connected_layer ν•¨μˆ˜λ₯Ό 톡해 κ°€μ€‘μΉ˜μ™€ μž…λ ₯을 κ³±ν•œ κ°’κ³Ό biasλ₯Ό λ”ν•œ 값을 κ³„μ‚°ν•˜μ—¬ ν™œμ„±ν™” ν•¨μˆ˜(Logistic λ˜λŠ” Tanh)λ₯Ό μ μš©ν•¨.

  • uz, ur, uh λ ˆμ΄μ–΄μ—μ„œ λ‚˜μ˜¨ 좜λ ₯κ°’κ³Ό wz, wr, wh λ ˆμ΄μ–΄μ—μ„œ λ‚˜μ˜¨ 좜λ ₯값을 μ΄μš©ν•˜μ—¬ z와 r 값을 계산함.

  • z값은 이전 μƒνƒœμ™€ ν˜„μž¬ μž…λ ₯을 μ‘°ν•©ν•œ ν›„ λ‘œμ§€μŠ€ν‹± ν•¨μˆ˜λ₯Ό μ μš©ν•˜μ—¬ 계산함.

  • r값은 zκ°’κ³Ό λ§ˆμ°¬κ°€μ§€λ‘œ 이전 μƒνƒœμ™€ ν˜„μž¬ μž…λ ₯을 μ‘°ν•©ν•œ ν›„ λ‘œμ§€μŠ€ν‹± ν•¨μˆ˜λ₯Ό μ μš©ν•˜μ—¬ 계산함.

  • h값은 zκ°’κ³Ό 이전 μƒνƒœλ₯Ό μ΄μš©ν•˜μ—¬ μƒˆλ‘œμš΄ μƒνƒœλ₯Ό κ³„μ‚°ν•˜κΈ° μœ„ν•œ 게이트(gate)λ₯Ό 계산함.

  • κ³„μ‚°λœ h값에 Tanh λ˜λŠ” Logistic ν•¨μˆ˜λ₯Ό μ μš©ν•˜μ—¬ 좜λ ₯κ°’(output)을 계산함.

  • GRU λ ˆμ΄μ–΄λŠ” μ—¬λŸ¬ μ‹œμ (time step)으둜 κ΅¬μ„±λ˜μ–΄ μžˆμœΌλ―€λ‘œ, steps 만큼 반볡적으둜 forward_connected_layer ν•¨μˆ˜λ₯Ό ν˜ΈμΆœν•˜μ—¬ 쀑간값듀을 계산함.

backward_gru_layer

void backward_gru_layer(layer l, network net)
{
}

ν•¨μˆ˜ 이름: backward_gru_layer

μž…λ ₯:

  • layer l

  • network net (λ‘˜ λ‹€ ꡬ쑰체)

λ™μž‘:

  • GRU (게이트 μˆœν™˜ μœ λ‹›) λ ˆμ΄μ–΄μ˜ μ—­μ „νŒŒ(backpropagation)λ₯Ό κ³„μ‚°ν•˜κ³  이전 λ ˆμ΄μ–΄μ—κ²Œ 였차 μ‹ ν˜Έ(error signal)λ₯Ό μ „λ‹¬ν•©λ‹ˆλ‹€.

  • 이λ₯Ό μœ„ν•΄ μž…λ ₯ μ‹ ν˜Έμ™€ κ°€μ€‘μΉ˜(weight)에 λŒ€ν•œ λ―ΈλΆ„(gradient)을 κ³„μ‚°ν•©λ‹ˆλ‹€.

μ„€λͺ…:

  • l: GRU λ ˆμ΄μ–΄μ˜ ꡬ쑰체둜, μž…λ ₯ μ‹ ν˜Έμ™€ κ°€μ€‘μΉ˜, 좜λ ₯κ³Ό 같은 λ‹€μ–‘ν•œ 정보λ₯Ό λ‹΄κ³  μžˆμŠ΅λ‹ˆλ‹€.

  • net: 신경망 ꡬ쑰체둜, μ—­μ „νŒŒ μ‹œμ— 이전 λ ˆμ΄μ–΄λ‘œ 였차 μ‹ ν˜Έλ₯Ό μ „λ‹¬ν•˜κΈ° μœ„ν•΄ μ‚¬μš©λ©λ‹ˆλ‹€.

이 ν•¨μˆ˜λŠ” 빈 μƒνƒœλ‘œ 남겨둔 것이 μ•„λ‹ˆλΌ, κ΅¬ν˜„ λ‚΄μš©μ΄ μ—†λŠ” κ²ƒμž…λ‹ˆλ‹€. ν•¨μˆ˜λ₯Ό ν˜ΈμΆœν•  λ•Œ μ‹€μ œλ‘œ 계산이 μ΄λ£¨μ–΄μ§‘λ‹ˆλ‹€.

update_gru_layer

void update_gru_layer(layer l, update_args a)
{
    update_connected_layer(*(l.ur), a);
    update_connected_layer(*(l.uz), a);
    update_connected_layer(*(l.uh), a);
    update_connected_layer(*(l.wr), a);
    update_connected_layer(*(l.wz), a);
    update_connected_layer(*(l.wh), a);
}

ν•¨μˆ˜ 이름: update_gru_layer

μž…λ ₯:

  • layer l: GRU λ ˆμ΄μ–΄ ꡬ쑰체

  • update_args a: μ—…λ°μ΄νŠΈ 인자 ꡬ쑰체

λ™μž‘:

  • GRU λ ˆμ΄μ–΄μ˜ 각각의 μ—°κ²°λœ λ ˆμ΄μ–΄(ur, uz, uh, wr, wz, wh)λ“€μ˜ κ°€μ€‘μΉ˜(weight)와 biasλ₯Ό μ—…λ°μ΄νŠΈν•˜λŠ” ν•¨μˆ˜

μ„€λͺ…:

  • μž…λ ₯으둜 μ£Όμ–΄μ§„ GRU λ ˆμ΄μ–΄ ꡬ쑰체 l의 μ—°κ²°λœ λ ˆμ΄μ–΄(ur, uz, uh, wr, wz, wh)λ“€μ˜ κ°€μ€‘μΉ˜μ™€ biasλ₯Ό μ—…λ°μ΄νŠΈν•˜λŠ” ν•¨μˆ˜μ΄λ‹€.

  • 이λ₯Ό μœ„ν•΄ update_connected_layer() ν•¨μˆ˜λ₯Ό 각 λ ˆμ΄μ–΄μ— λŒ€ν•΄ ν˜ΈμΆœν•˜μ—¬ κ°€μ€‘μΉ˜λ₯Ό μ—…λ°μ΄νŠΈν•œλ‹€.

make_gru_layer

layer make_gru_layer(int batch, int inputs, int outputs, int steps, int batch_normalize, int adam)
{
    fprintf(stderr, "GRU Layer: %d inputs, %d outputs\n", inputs, outputs);
    batch = batch / steps;
    layer l = {0};
    l.batch = batch;
    l.type = GRU;
    l.steps = steps;
    l.inputs = inputs;

    l.uz = malloc(sizeof(layer));
    fprintf(stderr, "\t\t");
    *(l.uz) = make_connected_layer(batch*steps, inputs, outputs, LINEAR, batch_normalize, adam);
    l.uz->batch = batch;

    l.wz = malloc(sizeof(layer));
    fprintf(stderr, "\t\t");
    *(l.wz) = make_connected_layer(batch*steps, outputs, outputs, LINEAR, batch_normalize, adam);
    l.wz->batch = batch;

    l.ur = malloc(sizeof(layer));
    fprintf(stderr, "\t\t");
    *(l.ur) = make_connected_layer(batch*steps, inputs, outputs, LINEAR, batch_normalize, adam);
    l.ur->batch = batch;

    l.wr = malloc(sizeof(layer));
    fprintf(stderr, "\t\t");
    *(l.wr) = make_connected_layer(batch*steps, outputs, outputs, LINEAR, batch_normalize, adam);
    l.wr->batch = batch;

    l.uh = malloc(sizeof(layer));
    fprintf(stderr, "\t\t");
    *(l.uh) = make_connected_layer(batch*steps, inputs, outputs, LINEAR, batch_normalize, adam);
    l.uh->batch = batch;

    l.wh = malloc(sizeof(layer));
    fprintf(stderr, "\t\t");
    *(l.wh) = make_connected_layer(batch*steps, outputs, outputs, LINEAR, batch_normalize, adam);
    l.wh->batch = batch;

    l.batch_normalize = batch_normalize;


    l.outputs = outputs;
    l.output = calloc(outputs*batch*steps, sizeof(float));
    l.delta = calloc(outputs*batch*steps, sizeof(float));
    l.state = calloc(outputs*batch, sizeof(float));
    l.prev_state = calloc(outputs*batch, sizeof(float));
    l.forgot_state = calloc(outputs*batch, sizeof(float));
    l.forgot_delta = calloc(outputs*batch, sizeof(float));

    l.r_cpu = calloc(outputs*batch, sizeof(float));
    l.z_cpu = calloc(outputs*batch, sizeof(float));
    l.h_cpu = calloc(outputs*batch, sizeof(float));

    l.forward = forward_gru_layer;
    l.backward = backward_gru_layer;
    l.update = update_gru_layer;

    return l;
}

ν•¨μˆ˜ 이름: make_gru_layer

μž…λ ₯:

  • int batch: 배치 크기

  • int inputs: μž…λ ₯의 크기

  • int outputs: 좜λ ₯의 크기

  • int steps: μ‹œκ°„ μŠ€ν…μ˜ 수

  • int batch_normalize: 배치 μ •κ·œν™” μ‚¬μš© μ—¬λΆ€

  • int adam: Adam μ˜΅ν‹°λ§ˆμ΄μ € μ‚¬μš© μ—¬λΆ€

λ™μž‘:

  • GRU λ ˆμ΄μ–΄λ₯Ό μƒμ„±ν•˜κ³  μ΄ˆκΈ°ν™”ν•˜λŠ” ν•¨μˆ˜μ΄λ‹€. GRU λ ˆμ΄μ–΄λŠ” uz, wr, uh, wh λ“±μ˜ μ—°κ²° λ ˆμ΄μ–΄λ‘œ κ΅¬μ„±λ˜μ–΄ μžˆλ‹€.

μ„€λͺ…:

  • μž…λ ₯κ°’μœΌλ‘œ 받은 batch 값은 steps둜 λ‚˜λˆ„μ–΄μ Έμ„œ μ‚¬μš©λœλ‹€.

  • λ ˆμ΄μ–΄μ˜ νƒ€μž…μ€ GRU둜 μ„€μ •λœλ‹€.

  • uz, wz, ur, wr, uh, wh λ“±μ˜ μ—°κ²° λ ˆμ΄μ–΄κ°€ μƒμ„±λ˜κ³  μ΄ˆκΈ°ν™”λœλ‹€.

  • 좜λ ₯κ°’, delta, state, prev_state, forgot_state, forgot_delta, r_cpu, z_cpu, h_cpu λ“±μ˜ 값듀이 μ΄ˆκΈ°ν™”λœλ‹€.

  • forward, backward, update ν•¨μˆ˜κ°€ μ„€μ •λœλ‹€.

  • μ΄ˆκΈ°ν™”λœ GRU λ ˆμ΄μ–΄κ°€ λ°˜ν™˜λœλ‹€.

Last updated

Was this helpful?