DL之RNN:人工智能为你写代码——基于TF利用RNN算法实现生成编程语言代码(C++语言)、训练&测试过程全记录
DL之RNN:基于TF利用RNN算法实现生成编程语言代码(C语言)、训练&测试过程全记录
输出结果
1、test01
,&&curses = 0;
if (tsk->state_perf_event_set && id_state_init == 1)
return 0;
if (!still &= is_stop_init(trace, context) {
if (seq_context_trace_set_torture)
prev(size);
pr_cont(&set);
if (event->pid == 0)
goto out;
return ret;
return 0;
}
if (enabled == 0)
ster_state = sched(ts, &sec_stack, task->tos);
}
static void sig_call(struct cpu_state *tr,
struct rcu_state *size)
{
int find_task;
int cpu = 0;
unsigned long free;
set_timerective(task_parent_tasks);
if (!perf_points == NULL)
return;
seq_printf(s, " %ns\n", str, signal, flags);
pid = ret;
}
static unsigned long table_read_id_chorch_set_calc(struct trace_policy *rsp)
{
struct ftrace_event *init = task_sched_cpu();
struct trace *ret = &ret;
struct trace_iterator *trace_read_interval;
sched = cpu_post_context(cpu);
return -ENOMEM;
}
/**
* time_context_tasks: info as an set aling
* @pron_set on this caller the at of this.
*
* Which ops set size that commitime is all can a is alloc
2、test02
.timespec */
ring_buffer_init(resoulc->system_trace_context);
size_of != cpu_cpu_stack;
sprevert_return_init(&task_tail_timer, file);
return ret;
}
static void statist_child_signal(struct task_struct *case,
s instats_start *size,
struct ping *timer, char *stat)
{
return init(&struct state + size, state->timeolt, str, cpu) = rq->to_count_compole_print_task_caller(cpu);
}
static int sched_copy_praid_irq(struct spin_lock *str, struct rq *rq->seq)
{
if (task_const int)
return;
return;
static void rcu_boost_real(struct task_struct *stat, struct ftrace_event_call *trace)
{
if (!page->temp_pid(&ts->class == 0) & TASK_MONTED; i < stop_pool(current, ftrace_console_file, trace_task_furcs)
cred->flags & CANDER | 1
3、test03
. start, arg state task
* an the struct strings sigre the clear that and the secs. */
return 0;
}
/*
* Rincend time a current the arched on the for and the time
* store the task tracer an a to in the set_tryset is interrupted
* aling are a set the task trigger already ticks to call. If call. The curr the check to string of
* instatt is in a are the file set some try instructy it strentity of
* is it as that the to son to inticate the state on the states at
*/
static int seq_buf_call(sched_lock)
{
struct cpu *completion;
/* The this stored the current allocation the tracer */
if (timeore-- != 1)
return 0;
raw_buffer_state = true;
context->trace_seq_puttour_spres(size,
seq->free_stres) {
if (raw_int_risk(®s) ||
struct class_cpu())
case TRACE_STACK_PONRINLED(tracing_time_clocks);
}
if (ret)
count = set_test_size();
trace_buffer_clear_restart(&call->filter);
init_stop(timer, system, flags);
}
static int proc_system_trac
监控模型
训练&测试过程全记录
1、训练过程
2018-10-13 18:47:32.811423:
step: 10/20000... loss: 3.7245... 0.4903 sec/batch
step: 20/20000... loss: 3.6450... 0.4442 sec/batch
step: 30/20000... loss: 3.8607... 0.4482 sec/batch
step: 40/20000... loss: 3.5700... 0.4993 sec/batch
step: 50/20000... loss: 3.5019... 0.5444 sec/batch
step: 60/20000... loss: 3.4688... 0.4512 sec/batch
step: 70/20000... loss: 3.3954... 0.5244 sec/batch
step: 80/20000... loss: 3.3532... 0.4031 sec/batch
step: 90/20000... loss: 3.2842... 0.6517 sec/batch
step: 100/20000... loss: 3.1934... 0.4893 sec/batch
……
step: 990/20000... loss: 2.0868... 0.4111 sec/batch
step: 1000/20000... loss: 2.0786... 0.4001 sec/batch
step: 1010/20000... loss: 2.0844... 0.4352 sec/batch
step: 1020/20000... loss: 2.1136... 0.4402 sec/batch
step: 1030/20000... loss: 2.1023... 0.3199 sec/batch
step: 1040/20000... loss: 2.0460... 0.4522 sec/batch
step: 1050/20000... loss: 2.1545... 0.4432 sec/batch
step: 1060/20000... loss: 2.1058... 0.3680 sec/batch
step: 1070/20000... loss: 2.0850... 0.4201 sec/batch
step: 1080/20000... loss: 2.0811... 0.4682 sec/batch
step: 1090/20000... loss: 2.0438... 0.4301 sec/batch
step: 1100/20000... loss: 2.0640... 0.4462 sec/batch
……
step: 1990/20000... loss: 1.9070... 0.4442 sec/batch
step: 2000/20000... loss: 1.8854... 0.3920 sec/batch
……
step: 4000/20000... loss: 1.7408... 0.4612 sec/batch
step: 4010/20000... loss: 1.8354... 0.4402 sec/batch
step: 4020/20000... loss: 1.8101... 0.3951 sec/batch
step: 4030/20000... loss: 1.8578... 0.4422 sec/batch
step: 4040/20000... loss: 1.7468... 0.3770 sec/batch
step: 4050/20000... loss: 1.8008... 0.4301 sec/batch
step: 4060/20000... loss: 1.9093... 0.3650 sec/batch
step: 4070/20000... loss: 1.8889... 0.4582 sec/batch
step: 4080/20000... loss: 1.8673... 0.4682 sec/batch
step: 4090/20000... loss: 1.7999... 0.3951 sec/batch
step: 4100/20000... loss: 1.7484... 0.4582 sec/batch
step: 4110/20000... loss: 1.7629... 0.4071 sec/batch
step: 4120/20000... loss: 1.6727... 0.3940 sec/batch
step: 4130/20000... loss: 1.7895... 0.3750 sec/batch
step: 4140/20000... loss: 1.8002... 0.3860 sec/batch
step: 4150/20000... loss: 1.7922... 0.4532 sec/batch
step: 4160/20000... loss: 1.7259... 0.3951 sec/batch
step: 4170/20000... loss: 1.7123... 0.4642 sec/batch
step: 4180/20000... loss: 1.7262... 0.3760 sec/batch
step: 4190/20000... loss: 1.8545... 0.3910 sec/batch
step: 4200/20000... loss: 1.8221... 0.3539 sec/batch
step: 4210/20000... loss: 1.8693... 0.4472 sec/batch
step: 4220/20000... loss: 1.8502... 0.4793 sec/batch
step: 4230/20000... loss: 1.7788... 0.3539 sec/batch
step: 4240/20000... loss: 1.8240... 0.4793 sec/batch
step: 4250/20000... loss: 1.7947... 0.4101 sec/batch
step: 4260/20000... loss: 1.8094... 0.3630 sec/batch
step: 4270/20000... loss: 1.7775... 0.4021 sec/batch
step: 4280/20000... loss: 1.8868... 0.3950 sec/batch
step: 4290/20000... loss: 1.7982... 0.4532 sec/batch
step: 4300/20000... loss: 1.8579... 0.3359 sec/batch
step: 4310/20000... loss: 1.7709... 0.4412 sec/batch
step: 4320/20000... loss: 1.7422... 0.4011 sec/batch
step: 4330/20000... loss: 1.7841... 0.5775 sec/batch
step: 4340/20000... loss: 1.7253... 0.4532 sec/batch
step: 4350/20000... loss: 1.8973... 0.3479 sec/batch
step: 4360/20000... loss: 1.7462... 0.3680 sec/batch
step: 4370/20000... loss: 1.8291... 0.5204 sec/batch
step: 4380/20000... loss: 1.7276... 0.3930 sec/batch
step: 4390/20000... loss: 1.7404... 0.3289 sec/batch
step: 4400/20000... loss: 1.6993... 0.4462 sec/batch
step: 4410/20000... loss: 1.8670... 0.3920 sec/batch
step: 4420/20000... loss: 1.8217... 0.4301 sec/batch
step: 4430/20000... loss: 1.8339... 0.5164 sec/batch
step: 4440/20000... loss: 1.7154... 0.3660 sec/batch
step: 4450/20000... loss: 1.8485... 0.3920 sec/batch
step: 4460/20000... loss: 1.7758... 0.4161 sec/batch
step: 4470/20000... loss: 1.7017... 0.5234 sec/batch
step: 4480/20000... loss: 1.6939... 0.3379 sec/batch
step: 4490/20000... loss: 1.7715... 0.3951 sec/batch
step: 4500/20000... loss: 1.7940... 0.4492 sec/batch
step: 4510/20000... loss: 1.7804... 0.3740 sec/batch
step: 4520/20000... loss: 1.7876... 0.5073 sec/batch
step: 4530/20000... loss: 1.7149... 0.5825 sec/batch
step: 4540/20000... loss: 1.7723... 0.3961 sec/batch
step: 4550/20000... loss: 1.8180... 0.4271 sec/batch
step: 4560/20000... loss: 1.7757... 0.4933 sec/batch
step: 4570/20000... loss: 1.8858... 0.3309 sec/batch
step: 4580/20000... loss: 1.7332... 0.3890 sec/batch
step: 4590/20000... loss: 1.8466... 0.4251 sec/batch
step: 4600/20000... loss: 1.8532... 0.3930 sec/batch
step: 4610/20000... loss: 1.8826... 0.3850 sec/batch
step: 4620/20000... loss: 1.8447... 0.3359 sec/batch
step: 4630/20000... loss: 1.7697... 0.4221 sec/batch
step: 4640/20000... loss: 1.9220... 0.3549 sec/batch
step: 4650/20000... loss: 1.7555... 0.4011 sec/batch
step: 4660/20000... loss: 1.8541... 0.3830 sec/batch
step: 4670/20000... loss: 1.8676... 0.4181 sec/batch
step: 4680/20000... loss: 1.9653... 0.3600 sec/batch
step: 4690/20000... loss: 1.8377... 0.3981 sec/batch
step: 4700/20000... loss: 1.7620... 0.4291 sec/batch
step: 4710/20000... loss: 1.7802... 0.4251 sec/batch
step: 4720/20000... loss: 1.7495... 0.4131 sec/batch
step: 4730/20000... loss: 1.7338... 0.3299 sec/batch
step: 4740/20000... loss: 1.9160... 0.4662 sec/batch
step: 4750/20000... loss: 1.8142... 0.3389 sec/batch
step: 4760/20000... loss: 1.8162... 0.3680 sec/batch
step: 4770/20000... loss: 1.8710... 0.4552 sec/batch
step: 4780/20000... loss: 1.8923... 0.4321 sec/batch
step: 4790/20000... loss: 1.8062... 0.4061 sec/batch
step: 4800/20000... loss: 1.8175... 0.4342 sec/batch
step: 4810/20000... loss: 1.9355... 0.3459 sec/batch
step: 4820/20000... loss: 1.7608... 0.4191 sec/batch
step: 4830/20000... loss: 1.8031... 0.3991 sec/batch
step: 4840/20000... loss: 1.9261... 0.4472 sec/batch
step: 4850/20000... loss: 1.7129... 0.3981 sec/batch
step: 4860/20000... loss: 1.7748... 0.4642 sec/batch
step: 4870/20000... loss: 1.8557... 0.4221 sec/batch
step: 4880/20000... loss: 1.7181... 0.4452 sec/batch
step: 4890/20000... loss: 1.7657... 0.5134 sec/batch
step: 4900/20000... loss: 1.8971... 0.4813 sec/batch
step: 4910/20000... loss: 1.7947... 0.3670 sec/batch
step: 4920/20000... loss: 1.7647... 0.4362 sec/batch
step: 4930/20000... loss: 1.7945... 0.3509 sec/batch
step: 4940/20000... loss: 1.7773... 0.4342 sec/batch
step: 4950/20000... loss: 1.7854... 0.4121 sec/batch
step: 4960/20000... loss: 1.7883... 0.3720 sec/batch
step: 4970/20000... loss: 1.7483... 0.3700 sec/batch
step: 4980/20000... loss: 1.8686... 0.5645 sec/batch
step: 4990/20000... loss: 1.8472... 0.2075 sec/batch
step: 5000/20000... loss: 1.8808... 0.1955 sec/batch
……
step: 9990/20000... loss: 1.7760... 0.2306 sec/batch
step: 10000/20000... loss: 1.6906... 0.2256 sec/batch
……
step: 19800/20000... loss: 1.5745... 0.2657 sec/batch
step: 19810/20000... loss: 1.7075... 0.2326 sec/batch
step: 19820/20000... loss: 1.5854... 0.3660 sec/batch
step: 19830/20000... loss: 1.6520... 0.3529 sec/batch
step: 19840/20000... loss: 1.6153... 0.3434 sec/batch
step: 19850/20000... loss: 1.6174... 0.3063 sec/batch
step: 19860/20000... loss: 1.6060... 0.2717 sec/batch
step: 19870/20000... loss: 1.5775... 0.2627 sec/batch
step: 19880/20000... loss: 1.6181... 0.2326 sec/batch
step: 19890/20000... loss: 1.5117... 0.2547 sec/batch
step: 19900/20000... loss: 1.5613... 0.2356 sec/batch
step: 19910/20000... loss: 1.6465... 0.2346 sec/batch
step: 19920/20000... loss: 1.5160... 0.2607 sec/batch
step: 19930/20000... loss: 1.6922... 0.2306 sec/batch
step: 19940/20000... loss: 1.8708... 0.2527 sec/batch
step: 19950/20000... loss: 1.5579... 0.2276 sec/batch
step: 19960/20000... loss: 1.5850... 0.2376 sec/batch
step: 19970/20000... loss: 1.6798... 0.2286 sec/batch
step: 19980/20000... loss: 1.5684... 0.2667 sec/batch
step: 19990/20000... loss: 1.4981... 0.2617 sec/batch
step: 20000/20000... loss: 1.5322... 0.3199 sec/batch
训练的数据集展示
1、数据集展示
/**
* context_tracking_enter - Inform the context tracking that the CPU is going
* enter user or guest space mode.
*
* This function must be called right before we switch from the kernel
* to user or guest space, when it's guaranteed the remaining kernel
* instructions to execute won't use any RCU read side critical section
* because this function sets RCU in extended quiescent state.
*/
void context_tracking_enter(enum ctx_state state)
{
unsigned long flags;
/*
* Repeat the user_enter() check here because some archs may be calling
* this from asm and if no CPU needs context tracking, they shouldn't
* go further. Repeat the check here until they support the inline static
* key check.
*/
if (!context_tracking_is_enabled())
return;
/*
* Some contexts may involve an exception occuring in an irq,
* leading to that nesting:
* rcu_irq_enter() rcu_user_exit() rcu_user_exit() rcu_irq_exit()
* This would mess up the dyntick_nesting count though. And rcu_irq_*()
* helpers are enough to protect RCU uses inside the exception. So
* just return immediately if we detect we are in an IRQ.
*/
if (in_interrupt())
return;
/* Kernel threads aren't supposed to go to userspace */
WARN_ON_ONCE(!current->mm);
local_irq_save(flags);
if ( __this_cpu_read(context_tracking.state) != state) {
if (__this_cpu_read(context_tracking.active)) {
/*
* At this stage, only low level arch entry code remains and
* then we'll run in userspace. We can assume there won't be
* any RCU read-side critical section until the next call to
* user_exit() or rcu_irq_enter(). Let's remove RCU's dependency
* on the tick.
*/
if (state == CONTEXT_USER) {
trace_user_enter(0);
vtime_user_enter(current);
}
rcu_user_enter();
}
/*
* Even if context tracking is disabled on this CPU, because it's outside
* the full dynticks mask for example, we still have to keep track of the
* context transitions and states to prevent inconsistency on those of
* other CPUs.
* If a task triggers an exception in userspace, sleep on the exception
* handler and then migrate to another CPU, that new CPU must know where
* the exception returns by the time we call exception_exit().
* This information can only be provided by the previous CPU when it called
* exception_enter().
* OTOH we can spare the calls to vtime and RCU when context_tracking.active
* is false because we know that CPU is not tickless.
*/
__this_cpu_write(context_tracking.state, state);
}
local_irq_restore(flags);
}
NOKPROBE_SYMBOL(context_tracking_enter);
EXPORT_SYMBOL_GPL(context_tracking_enter);
void context_tracking_user_enter(void)
{
context_tracking_enter(CONTEXT_USER);
}
NOKPROBE_SYMBOL(context_tracking_user_enter);
/**
* context_tracking_exit - Inform the context tracking that the CPU is
* exiting user or guest mode and entering the kernel.
*
* This function must be called after we entered the kernel from user or
* guest space before any use of RCU read side critical section. This
* potentially include any high level kernel code like syscalls, exceptions,
* signal handling, etc...
*
* This call supports re-entrancy. This way it can be called from any exception
* handler without needing to know if we came from userspace or not.
*/
void context_tracking_exit(enum ctx_state state)
{
unsigned long flags;
if (!context_tracking_is_enabled())
return;
if (in_interrupt())
return;
local_irq_save(flags);
if (__this_cpu_read(context_tracking.state) == state) {
if (__this_cpu_read(context_tracking.active)) {
/*
* We are going to run code that may use RCU. Inform
* RCU core about that (ie: we may need the tick again).
*/
rcu_user_exit();
if (state == CONTEXT_USER) {
vtime_user_exit(current);
trace_user_exit(0);
}
}
__this_cpu_write(context_tracking.state, CONTEXT_KERNEL);
}
local_irq_restore(flags);
}
NOKPROBE_SYMBOL(context_tracking_exit);
EXPORT_SYMBOL_GPL(context_tracking_exit);
void context_tracking_user_exit(void)
{
context_tracking_exit(CONTEXT_USER);
}
NOKPROBE_SYMBOL(context_tracking_user_exit);
赞 (0)