wget ftp://ftp.cwru.edu/pub/bash/readline-6.3.tar.gz http://download.zeromq.org/zeromq-4.0.3.tar.gz tar xfz zeromq-4.0.3.tar.gz cd zeromq-4.0.3/ ./configure --prefix=~/local make install
1 2 3 4 5 6 7 8 9 10 11 12
# Install zmq library #------------------- export LD_LIBRARY_PATH=~/local/lib/ export PKG_CONFIG_PATH=~/local/lib/pkgconfig/ export C_INCLUDE_PATH=$C_INCLUDE_PATH:~/usr/include export CPLUS_INCLUDE_PATH=$CPLUS_INCLUDE_PATH:~/usr/include When building packages, use ./configure --prefix=/home/<username>/usr
# insome OS's you may need this too: export CXXFLAGS="-I ~/local/include" // solve the problem of headfile not found
u/torch/install/bin/luajit: syev :Lapack library not found in compile time at /tmp/luarocks_torch-scm-1-5326/torch7/lib/TH/generic/THLapack.c:68 stack traceback: [C]: at 0x7ff39fdc7930 [C]: in function 'symeig' /home/ee532_stu/cifar10_handsome/lib/preprocessing.lua:16: infunction 'pcacov' /home/ee532_stu/cifar10_handsome/lib/preprocessing.lua:32: infunction 'zca_whiten' /home/ee532_stu/cifar10_handsome/lib/preprocessing.lua:74: infunction 'zca' /home/ee532_stu/cifar10_handsome/lib/preprocessing.lua:92: infunction 'preprocessing' validate.lua:63: in function 'validation' validate.lua:85: in main chunk [C]: in function 'dofile' ..._stu/torch/install/lib/luarocks/rocks/trepl/scm-1/bin/th:131: inmainchunk [C]: at 0x00406670
solve: install openblas and lapack, re install torch
1 2 3 4
INSTALL OPENBLAS:
make NO_LAPACK=0 USE_OPENMP=1 make PREFIX=/PATH/TO/OPENBLAS install
error:
1 2
OpenBLAS Warning : Detect OpenMP Loopand this application may hang. Please rebuild the librarywith USE_OPENMP=1 option. // when running train.lua
solution: export OMP_NUM_THREADS=1
error:
kaggle: size mismatch cifar-10: cur-target less than 0
torch syntax
1 2 3 4 5 6 7 8
-- for torch.tensor a:size()
-- nn.Sequential, type: <table> a = medel.modules[1] // if more than one module in the image
mlp=nn.Sequential(); --Create a network that takes a Tensor as input mlp:add(nn.SplitTable(2)) c=nn.ParallelTable() --The two Tensors go through two different Linear c:add(nn.Linear(10,3)) --Layers in Parallel c:add(nn.Linear(10,7)) mlp:add(c) --Outputing a table with 2 elements p=nn.ParallelTable() --These tables go through two more linear layers p:add(nn.Linear(3,2)) -- separately. p:add(nn.Linear(7,1)) mlp:add(p) mlp:add(nn.JoinTable(1)) --Finally, the tables are joined together and output.
th> mlp:forward(torch.Tensor(10,2)) -0.3382 0.3077 -0.1244 [torch.DoubleTensor of size 3]
-- flatten grasParameters: grad wrt to bias and weights 2.6499e+180 4.9592e+92 1.7894e+161 1.2806e+213 [torch.DoubleTensor of size 4]
[0.0017s] th> linear.output 0.7441 [torch.DoubleTensor of size 1]
tensor get col:
1 2 3 4
input = torch.Tensor(8,20) -- 8 rows, 20 cols input[1] -- return first row input[{{}, 5}] -- return the 5th col input[{{}, {4,8}}}] -- return the 4,5,6,7,8 col
import csv
1 2 3 4 5 6 7 8 9 10 11 12
import csv text_file = open("label.txt", "w") with open('label.csv' ,'rb') as csvfile: reader = csv.reader(csvfile) i = 1 for row in reader: name = 'imgs/train{:05d}.jpg'.format(i) label = row[0] text_file.writelines('%s %s \n' % (name, label)) i = i + 1
text_file.close()
minibatch SGD
cost function: training + testing parameter updating gradient descent
localfunctionsplit(str, sep) sep = sep or',' fields={} local matchfunc = string.gmatch(str, "([^"..sep.."]+)") ifnot matchfunc thenreturn {str} end for str in matchfunc do table.insert(fields, str) end return fields end
--------------------------------------------------------------------- functionread(path, sep, tonum) tonum = tonum ortrue sep = sep or',' local csvFile = {} local file = assert(io.open(path, "r")) for line in file:lines() do fields = split(line, sep) if tonum then-- convert numeric fields to numbers for i=1,#fields do fields[i] = tonumber(fields[i]) or fields[i] end end table.insert(csvFile, fields) end file:close() return csvFile end