Browse Source

kvx -> kvs INITIAL IMPORT

Namdak Tonpa 6 years ago
parent
commit
5d289e071d

+ 5 - 0
.formatter.exs

@@ -0,0 +1,5 @@
+# Used by "mix format"
+[
+  inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"],
+  line_length: 133
+]

+ 5 - 0
.gitignore

@@ -0,0 +1,5 @@
+ebin
+*.dets
+*.gz
+*.dump
+.applist

+ 8 - 0
.travis.yml

@@ -0,0 +1,8 @@
+language: erlang
+otp_release:
+  - 19.3
+  - 20.0
+script:
+  - "curl -fsSL https://raw.github.com/synrc/mad/master/mad > mad"
+  - "chmod +x mad"
+  - "./mad dep com"

+ 1 - 0
CNAME

@@ -0,0 +1 @@
+kvs.n2o.space

+ 15 - 0
LICENSE

@@ -0,0 +1,15 @@
+Copyright (c) 2014-2018 Maxim Sokhatsky <maxim@synrc.com>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and following permission notice appear in all copies:
+
+Software may only be used for the great good and the true happiness of all sentient beings.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

+ 36 - 0
README.md

@@ -0,0 +1,36 @@
+KVX: Abstract Chain Database
+============================
+[![Build Status](https://travis-ci.org/synrc/kvx.svg?branch=master)](https://travis-ci.org/synrc/kvx)
+
+Features
+--------
+
+* Polymorphic Tuples aka Extensible Records
+* Basic Schema for Storing Chains
+* Backends: MNESIA, FS, ROCKSDB
+* Extremely Compact: 500 LOC
+
+Usage
+-----
+
+```
+$ git clone https://github.com/synrc/kvx && cd kvx
+$ open man/kvx.htm
+$ mad com pla rep
+> kvx:join().
+```
+
+Release Notes
+-------------
+
+[1]. <a href="https://tonpa.guru/stream/2018/2018-11-13%20Новая%20версия%20KVX.htm">2018-11-13 Новая версия KVX 5.11</a><br>
+[2]. <a href="https://tonpa.guru/stream/2019/2019-04-13%20Новая%20версия%20KVX.htm">2019-04-13 Новая версия KVX 6.4</a>
+
+Credits
+-------
+
+* Maxim Sokhatsky
+* Vlad Ki
+* Andrii Zadorozhnii
+* Yuri Maslovsky
+* Igor Kharin

+ 6 - 0
config/config.exs

@@ -0,0 +1,6 @@
+use Mix.Config
+
+config :kvs,
+  dba: :kvs_rocks,
+  dba_st: :kvs_st,
+  schema: [:kvs, :kvs_stream]

+ 4 - 0
etc/fs.config

@@ -0,0 +1,4 @@
+[ {kvx,[{dba,kvx_fs},
+        {dba_st,kvx_stream},
+        {schema,[kvx,kvx_stream]}]}
+].

+ 4 - 0
etc/mnesia.config

@@ -0,0 +1,4 @@
+[ {kvx,[{dba,kvx_mnesia},
+        {dba_st,kvx_stream},
+        {schema,[kvx,kvx_stream]}]}
+].

+ 4 - 0
etc/rocks-lists.config

@@ -0,0 +1,4 @@
+[ {kvx,[{dba,kvx_rocks},
+        {dba_st,kvx_stream},
+        {schema,[kvx,kvx_stream]}]}
+].

+ 4 - 0
etc/rocks.config

@@ -0,0 +1,4 @@
+[ {kvx,[{dba,kvx_rocks},
+        {dba_st,kvx_st},
+        {schema,[kvx,kvx_stream]}]}
+].

+ 18 - 0
include/api.hrl

@@ -0,0 +1,18 @@
+-ifndef(API_HRL).
+-define(API_HRL, true).
+-define(API,[start/0,stop/0,leave/0,leave/1,
+             join/0,join/1,modules/0,cursors/0,get/2,get/3,put/1,put/2,index/3,delete/2,
+             table/1,tables/0,dir/0,initialize/2,seq/2,all/1,all/2,count/1,ver/0]).
+-include("metainfo.hrl").
+-spec seq(atom() | [], integer() | []) -> term().
+-spec count(atom()) -> integer().
+-spec dir() -> list({'table',atom()}).
+-spec ver() -> {'version',string()}.
+-spec leave() -> ok.
+-spec join() -> ok | {error,any()}.
+-spec join(Node :: string()) -> [{atom(),any()}].
+-spec modules() -> list(atom()).
+-spec cursors() -> list({atom(),list(atom())}).
+-spec tables() -> list(#table{}).
+-spec table(Tab :: atom()) -> #table{}.
+-endif.

+ 13 - 0
include/backend.hrl

@@ -0,0 +1,13 @@
+-ifndef(BACKEND_HRL).
+-define(BACKEND_HRL, true).
+-define(BACKEND, [get/2,put/1,delete/2,index/3,dump/0,start/0,stop/0,destroy/0,
+                  join/1,dir/0,create_table/2,add_table_index/2,seq/2,all/1,count/1,version/0]).
+-compile({no_auto_import,[get/1,put/2]}).
+-spec put(tuple() | list(tuple())) -> ok | {error,any()}.
+-spec get(atom() | any(), any()) -> {ok,any()} | {error,not_found}.
+-spec delete(atom(), any()) -> ok | {error,not_found}.
+-spec dump() -> ok.
+-spec start() -> ok.
+-spec stop() -> ok.
+-spec index(atom(), any(), any()) -> list(tuple()).
+-endif.

+ 15 - 0
include/cursors.hrl

@@ -0,0 +1,15 @@
+-ifndef(CURSORS_HRL).
+-define(CURSORS_HRL, true).
+
+-record(writer, { id    = [] :: [] | term(),
+                  count =  0 :: integer(),
+                  cache = [] :: [] | integer() | {term(),term()},
+                  args  = [] :: term(),
+                  first = [] :: [] | tuple() } ).
+-record(reader, { id    = [] :: [] | integer(),
+                  pos   =  0 :: integer(),
+                  cache = [] :: [] | integer() | {term(),term()},
+                  args  = [] :: term(),
+                  feed  = [] :: term(),
+                  dir   =  0 :: 0 | 1 } ).
+-endif.

+ 10 - 0
include/kvs.hrl

@@ -0,0 +1,10 @@
+-ifndef(KVX_HRL).
+-define(KVX_HRL, true).
+-record(id_seq, { thing = []::term(), id =  0 :: integer() } ).
+-record(it,     { id    = []::[] | integer() } ).
+-record(ite,    { id    = []::[] | integer(), next  = []::[] | integer() } ).
+-record(iter,   { id    = []::[] | integer(), next  = []::[] | integer(), prev  = []::[] | integer() } ).
+-record(kvs,    { mod   = kvs_mnesia :: kvs_mnesia | kvs_rocks | kvs_fs,
+                  st    = kvs_stream :: kvs_stream | kvs_st,
+                  cx    = []::term() }).
+-endif.

+ 6 - 0
include/metainfo.hrl

@@ -0,0 +1,6 @@
+-ifndef(METAINFO_HRL).
+-define(METAINFO_HRL, true).
+-record(schema, {name,tables=[]}).
+-record(table,  {name,container=false,type=set,fields=[],keys=[],
+                 copy_type=application:get_env(kvx,mnesia_media,disc_copies)}).
+-endif.

+ 20 - 0
include/stream.hrl

@@ -0,0 +1,20 @@
+-ifndef(STREAM_HRL).
+-define(STREAM_HRL, true).
+-include("kvs.hrl").
+-include("cursors.hrl").
+-define(STREAM, [top/1, bot/1, next/1, prev/1, drop/1, take/1, append/2, cut/2,
+                 load_reader/1, writer/1, reader/1, save/1, add/1]).
+-spec top(#reader{}) -> #reader{}.
+-spec bot(#reader{}) -> #reader{}.
+-spec next(#reader{}) -> #reader{} | {error,not_found | empty}.
+-spec prev(#reader{}) -> #reader{} | {error,not_found | empty}.
+-spec drop(#reader{}) -> #reader{}.
+-spec take(#reader{}) -> #reader{}.
+-spec load_reader (term()) -> #reader{}.
+-spec writer (term()) -> #writer{}.
+-spec reader (term()) -> #reader{}.
+-spec save (#reader{} | #writer{}) -> #reader{} | #writer{}.
+-spec add(#writer{}) -> #writer{}.
+-spec append(tuple(),term()) -> any().
+-spec cut(term(),term()) -> {ok,non_neg_integer()} | {error, not_found}.
+-endif.

+ 132 - 0
index.html

@@ -0,0 +1,132 @@
+<html>
+<head>
+    <meta charset="utf-8" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <meta name="description" content="" />
+    <meta name="author" content="Maxim Sokhatsky" />
+    <title>KVS</title>
+    <link rel="stylesheet" href="https://synrc.space/synrc.css?v=2" />
+</head>
+<body>
+<nav>
+    <a href='https://n2o.dev'>DEV</a>
+    <a href='https://kvs.n2o.space' style="background:#ededed;">KVX</a>
+</nav>
+<header>
+    <a href="https://github.com/synrc/kvx"><img src="https://synrc.space/images/Synrc Neo.svg?v=1"></a>
+    <h1>KVS</h1>
+</header>
+<aside>
+    <article>
+        <section>
+            <h3>SYNOPSIS</h3>
+            <div>KVS is the light version of client interface on top of BTREE database
+                 abstractions strive to support the basic features: </div>
+            <div>
+                 <ul>
+                 <li>Polymorphic tuples aka extensible records;</li>
+                 <li>Basic schema for chain storage;</li>
+                 <li>Backends: MNESIA, FS, ROCKS;</li>
+                 <li>Extremely compact and battle-tested: 500 LOC.</li>
+                 </ul>
+            </div>
+            <div>This is an essence and fruit of KVX; the abstract term interface
+                 with naive yet productive stream implementation. Useful for simple
+                 blockchains, messaging, storage, processing systems, and banking industry.
+                 KVX is used in BPE and BANK applications.</div>
+        </section>
+        <section>
+            <h3>MODULES</h3>
+            <div><ul>
+           <li><a href="man/kvs.htm">KVS</a></font></li>
+           <li><a href="man/kvs_stream.htm">STREAM</a></font></li>
+           <li><a href="man/kvs_fs.htm">FS</a></font></li>
+           <li><a href="man/kvs_mnesia.htm">MNESIA</a></font></li>
+           <li><a href="man/kvs_rocks.htm">ROCKS</a></font></li>
+           <li><a href="man/kvs_st.htm">ST</a></font></li>
+            </ul></div>
+            <br>
+            <div>
+                APR 2019 &copy; <a href="https://github.com/5HT">5HT</a> ISC<br>
+                VER 6.6 6.5 6.4
+            </div>
+        </section>
+        <section>
+        <figure>
+        <code>
+ $ mad get kvx && cd kvx
+ $ mad com pla rep
+ > kvx:join().
+        </code>
+        </figure>
+        </section>
+    </article>
+</aside>
+<main>
+    <section>
+
+<h3>SESSION</h3>
+
+<figure><code>
+ > kvx:join().
+ ok
+
+ > kvx:check().
+ ok
+
+ > kvx:all(reader).
+ [{reader,1555175169121817000,0,[],[],
+          {list,1555175169120161000},
+          0},
+  {reader,1555175169121249000,0,[],[],
+          {list,1555175169120161000},
+          0}]
+
+ > rr(kvx).
+ [emails,id_seq,iter,kvx,reader,schema,table,writer]
+
+ > kvx:save(kvx:reader({list,1555175169120161000})).
+ #reader{id = 1555175244188986000,pos = 0,
+         cache = {emails,1555175169122304000},
+         args = [],
+         feed = {list,1555175169120161000},
+         dir = 0}
+
+ > kvx:take(kvx:bot((kvx:load_reader(1555175244188986000))#reader{args=-1})).
+ #reader{id = 1555175244188986000,pos = 5,
+         cache = {emails,1555175169127279000},
+         args = [#emails{id = 1555175169127279000,next = [],
+                         prev = 1555175169126314000,email = []},
+                 #emails{id = 1555175169126314000,next = 1555175169127279000,
+                         prev = 1555175169125227000,email = []},
+                 #emails{id = 1555175169125227000,next = 1555175169126314000,
+                         prev = 1555175169123405000,email = []},
+                 #emails{id = 1555175169123405000,next = 1555175169125227000,
+                         prev = 1555175169122304000,email = []},
+                 #emails{id = 1555175169122304000,next = 1555175169123405000,
+                         prev = [],email = []}],
+         feed = {list,1555175169120161000},
+         dir = 0}
+
+</code></figure>
+    </section>
+
+    <section>
+        <a name=plugin><h3>CONTRIBUTORS</h3></a>
+        <div>
+        <ul>
+           <li><a href="https://github.com/5HT">5HT</a> &mdash; Namdak Tonpa</li>
+           <li><a href="https://github.com/proger">proger</a> &mdash; Vlad Ki</li>
+           <li><a href="https://github.com/doxtop">doxtop</a> &mdash; Andrii Zadorozhnii</li>
+           <li><a href="https://github.com/cryoflamer">cryoflamer</a> &mdash; Yuri Maslovsky</li>
+           <li><a href="https://github.com/qomputer">qomputer</a> &mdash; Igor Kharin</li>
+        </ul></div>
+        <br><br>
+    </section>
+</main>
+<footer>
+    Made with <span class="heart">❤</span> to N2O
+</footer>
+</body>
+</html>

+ 15 - 0
lib/KVS.ex

@@ -0,0 +1,15 @@
+defmodule KVS do
+  require Record
+
+  Enum.each(Record.extract_all(from_lib: "kvs/include/cursors.hrl"), fn {name, definition} ->
+    Record.defrecord(name, definition)
+  end)
+
+  Enum.each(Record.extract_all(from_lib: "kvs/include/metainfo.hrl"), fn {name, definition} ->
+    Record.defrecord(name, definition)
+  end)
+
+  Enum.each(Record.extract_all(from_lib: "kvs/include/kvs.hrl"), fn {name, definition} ->
+    Record.defrecord(name, definition)
+  end)
+end

+ 158 - 0
man/kvs.htm

@@ -0,0 +1,158 @@
+<html>
+
+<head>
+    <meta charset="utf-8" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <meta name="description" content="" />
+    <meta name="author" content="Maxim Sokhatsky" />
+    <title>KVS</title>
+    <link rel="stylesheet" href="https://synrc.space/synrc.css" />
+</head>
+
+<body>
+
+<nav>
+    <a href='https://n2o.dev'>DEV</a>
+    <a href='https://kvx.n2o.space'>KVS</a>
+    <a href='#' style="background:#ededed;">KVS</a>
+</nav>
+
+<header>
+    <a href="../index.html"><img src="https://n2o.space/img/Synrc Neo.svg"></a>
+    <h1>KVS</h1>
+</header>
+
+<main>
+    <section>
+
+<h3>INTRO</h3>
+
+   <p>KVS module provides user level interface for console commands.
+      It has discovery, data manipulation and retrival features. Under the hood
+      it handles configurable run-time backends for each supported database.</p>
+
+        <p><blockquote><p><ul>
+            <li><b><a href="#dir">dir/0</a></b> &mdash; table list.</li>
+            <li><b><a href="#ver">ver/0</a></b> &mdash; KVS version.</li>
+            <li><b><a href="#seq">seq/2</a></b> &mdash; generate new sequence table id.</li>
+            <li><b><a href="#count">count/1</a></b> &mdash; counts records in table.</li>
+            <li><b><a href="#put">put/1</a></b> &mdash; put record using id as a key.</li>
+            <li><b><a href="#get">get/2</a></b> &mdash; get record by key from table.</li>
+            <li><b><a href="#delete">delete/1</a></b> &mdash; delete record from table.</li>
+            <li><b><a href="#index">index/3</a></b> &mdash; search records by field and its value.</li>
+        </ul></p></blockquote></p>
+
+ <p>You can change backend by setting application env.
+    This behaves well even under the heavy load.</p>
+
+    </section>
+    <section>
+
+<h3>SETUP</h3>
+
+<p>In sys.config you should specify kvx backend and list of modules
+   containing <b>metainfo/0</b> exported function.</p>
+
+<figure><code>
+  [{kvx, [{dba,store_mnesia},
+          {schema,[kvx,kvx_stream]}]}].
+
+</code></figure>
+
+<h4>dir() -> list({'table',atom()}).</h4>
+
+<p>Returns actual tables.</p>
+
+<h4>ver() -> {'version',string()}.</h4>
+
+<p>Returns backend version.</p>
+
+<h4>dump() -> ok.</h4>
+
+<p>Display database information.</p>
+
+<figure><code>
+ > kvx:dump().
+                NAME                     STORAGE TYPE    MEMORY (MB)   ELEMENTS
+
+              id_seq                      disc_copies           0.00          0
+              writer                      disc_copies           0.00          0
+              emails                      disc_copies           0.00          0
+              reader                      disc_copies           0.00          0
+
+ Snapshot taken: {{2018,11,10},{5,2,38}}
+ ok
+
+</code></figure>
+
+
+    </section>
+    <section>
+
+<h3>SEQ</h3>
+
+<p>Sequence table id_seq stores the counter per thing.
+   The couners are global and atomic in each supported database.
+   Sequences are used to generate unique names for records per distributed table.
+   If names in the table are not unique, e.g.
+   then count function may return a different value than the current sequence.
+   </p>
+
+<figure><code>
+  -record(id_seq, { thing = atom(),
+                    id    = 0 :: integer() } ).
+
+</code></figure>
+
+<h4>seq(atom(), integer()) -> integer().</h4>
+
+<p>Increments and returns id counter for the particular table.</p>
+
+<h4>count(atom()) -> integer().</h4>
+
+<p>Returns number of records in table.</p>
+
+    </section>
+    <section>
+
+<h3>BACKEND</h3>
+
+<p>Data operations.
+   </p>
+
+<h4>put(tuple()) -> ok | {error,any()}.</h4>
+
+<p>Storing of data record.</p>
+
+<h4>get(atom(),any()) -> {ok,any()} | {error, not_found | duplicated }.</h4>
+
+<p>Retrival of data record.</p>
+
+<h4>delete(atom(),any()) -> ok | {error,any()}.</h4>
+
+<p>Deletion of data record.</p>
+
+<h4>index(atom(),any(),any()) -> list(tuple()).</h4>
+
+<p>Search of data record by an indexed field and a given value.</p>
+
+    </section>
+    <section>
+
+<p>This module may refer to:
+<a href="kvx_fs.htm"><b>kvx_fs</b></a>,
+<a href="kvx_mnesia.htm"><b>kvx_mnesia</b></a>,
+<a href="kvx_rocks.htm"><b>kvx_rocks</b></a>,
+<a href="kvx_st.htm"><b>kvx_st</b></a>,
+<a href="kvx_stream.htm"><b>kvx_stream</b></a>.
+</p>
+    </section>
+</main>
+
+<footer>
+    2005&mdash;2019 &copy; Synrc Research Center
+</footer>
+
+</body>
+</html>

+ 46 - 0
man/kvs_fs.htm

@@ -0,0 +1,46 @@
+<html>
+
+<head>
+    <meta charset="utf-8" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <meta name="description" content="" />
+    <meta name="author" content="Maxim Sokhatsky" />
+    <title>FS</title>
+    <link rel="stylesheet" href="https://synrc.space/synrc.css" />
+</head>
+
+<body>
+
+<nav>
+    <a href='https://n2o.dev'>DEV</a>
+    <a href='https://kvx.n2o.space'>KVS</a>
+    <a href='#' style="background:#ededed;">FS</a>
+</nav>
+
+<header>
+    <a href="../index.html"><img src="https://n2o.space/img/Synrc Neo.svg"></a>
+    <h1>FS</h1>
+</header>
+
+<main>
+   <section>
+   <h3>INTRO</h3>
+   <p>FS is a <b>filesystem</b> backend imlpementation for KVS.
+      Put the {dba,kvx_fs} property for the kvx application in your sys.config.</p>
+   <br>
+   </section>
+    <section>
+<p>This module may refer to:
+<a href="kvx.htm"><b>kvx</b></a>.
+</p>
+
+    </section>
+</main>
+
+<footer>
+    2005&mdash;2019 &copy; Synrc Research Center
+</footer>
+
+</body>
+</html>

+ 48 - 0
man/kvs_mnesia.htm

@@ -0,0 +1,48 @@
+<html>
+
+<head>
+    <meta charset="utf-8" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <meta name="description" content="" />
+    <meta name="author" content="Maxim Sokhatsky" />
+    <title>MNESIA</title>
+    <link rel="stylesheet" href="https://synrc.space/synrc.css" />
+</head>
+
+<body>
+
+<nav>
+    <a href='https://n2o.dev'>DEV</a>
+    <a href='https://kvx.n2o.space'>KVS</a>
+    <a href='#' style="background:#ededed;">MNESIA</a>
+</nav>
+
+<header>
+    <a href="../index.html"><img src="https://n2o.space/img/Synrc Neo.svg"></a>
+    <h1>MNESIA</h1>
+</header>
+
+<main>
+   <article>
+   <section>
+   <h3>INTRO</h3>
+   <p>MNESIA is a <b>mnesia</b> backend imlpementation for KVS.
+      Put the {dba,kvx_mnesia} property for the kvx application in your  sys.config.</p>
+   <br>
+   </section>
+    <section>
+<p>This module may refer to:
+<a href="kvx.htm"><b>kvx</b></a>.
+</p>
+
+    </section>
+   </article>
+</main>
+
+<footer>
+    2005&mdash;2019 &copy; Synrc Research Center
+</footer>
+
+</body>
+</html>

+ 51 - 0
man/kvs_rocks.htm

@@ -0,0 +1,51 @@
+<html>
+
+<head>
+    <meta charset="utf-8" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <meta name="description" content="" />
+    <meta name="author" content="Maxim Sokhatsky" />
+    <title>ROCKS</title>
+    <link rel="stylesheet" href="https://synrc.space/synrc.css" />
+</head>
+
+<body>
+
+<nav>
+    <a href='https://n2o.dev'>DEV</a>
+    <a href='https://kvs.n2o.space'>KVS</a>
+    <a href='#' style="background:#ededed;">ROCKS</a>
+</nav>
+
+<header>
+    <a href="../index.html"><img src="https://n2o.space/img/Synrc Neo.svg"></a>
+    <h1>ROCKS</h1>
+</header>
+
+<main>
+   <article>
+   <section>
+   <h3>INTRO</h3>
+   <p>ROCKS is the <b>RocksDB</b> backend imlpementation for KVS.
+      Put the {dba,kvs_rocks} and (optionally) {dba_st,kvs_st} properties
+      for the kvs application in your sys.config.</p>
+   <br>
+   </section>
+    <section>
+<p>This module may refer to:
+<a href="kvs_st.htm"><b>kvs_st</b></a>,
+<a href="kvs_stream.htm"><b>kvs_stream</b></a>,
+<a href="kvs.htm"><b>kvs</b></a>.
+</p>
+
+    </section>
+   </article>
+</main>
+
+<footer>
+    2005&mdash;2019 &copy; Synrc Research Center
+</footer>
+
+</body>
+</html>

+ 43 - 0
man/kvs_st.htm

@@ -0,0 +1,43 @@
+<html>
+<head>
+    <meta charset="utf-8" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <meta name="description" content="" />
+    <meta name="author" content="Maxim Sokhatsky" />
+    <title>ST</title>
+    <link rel="stylesheet" href="https://synrc.space/synrc.css" />
+</head>
+<body>
+<nav>
+    <a href='https://n2o.dev'>DEV</a>
+    <a href='https://kvs.n2o.space'>KVS</a>
+    <a href='#' style="background:#ededed;">ST</a>
+</nav>
+<header>
+    <a href="../index.html"><img src="https://n2o.space/img/Synrc Neo.svg"></a>
+    <h1>ST</h1>
+</header>
+<main>
+    <section>
+
+<h3>INTRO</h3>
+
+<p>The ST module provides STREAM interface for ROCKS backend.
+   Set the {dba,kvs_rocks} along with {dba_st,kvs_st} in order to use ST module for stream operations.</p>
+    </section>
+    <section>
+<p>This module may refer to:
+<a href="kvs_stream.htm"><b>kvs_stream</b></a>,
+<a href="kvs_rocks.htm"><b>kvs_rocks</b></a>,
+<a href="kvs.htm"><b>kvs</b></a>.
+</p>
+    </section>
+</main>
+
+<footer>
+    2005&mdash;2019 &copy; Synrc Research Center
+</footer>
+
+</body>
+</html>

+ 172 - 0
man/kvs_stream.htm

@@ -0,0 +1,172 @@
+<html>
+<head>
+    <meta charset="utf-8" />
+    <meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
+    <meta name="viewport" content="width=device-width, initial-scale=1.0" />
+    <meta name="description" content="" />
+    <meta name="author" content="Maxim Sokhatsky" />
+    <title>STREAM</title>
+    <link rel="stylesheet" href="https://synrc.space/synrc.css" />
+</head>
+<body>
+<nav>
+    <a href='https://n2o.dev'>DEV</a>
+    <a href='https://kvs.n2o.space'>KVS</a>
+    <a href='#' style="background:#ededed;">STREAM</a>
+</nav>
+<header>
+    <a href="../index.html"><img src="https://n2o.space/img/Synrc Neo.svg"></a>
+    <h1>STREAM</h1>
+</header>
+<main>
+    <section>
+
+<h3>INTRO</h3>
+
+<p>The STREAM module provides chain accumulation, traversal and persistence.
+   </p>
+
+        <p><blockquote><p><ul>
+            <li><b><a href="#writer">writer/1</a></b> &mdash; creates writer cursor to db.</li>
+            <li><b><a href="#reader">reader/1</a></b> &mdash; creates reader cursor to db.</li>
+            <li><b><a href="#save">save/1</a></b> &mdash; stores cursor to db.</li>
+            <li><b><a href="#load">load_reader/1</a></b> &mdash; loads reader cursor.</li>
+            <li><b><a href="#load">load_writer/1</a></b> &mdash; loads writer cursor.</li>
+            <li><b><a href="#top">top/1</a></b> &mdash; returns top of the chain.</li>
+            <li><b><a href="#bot">bot/1</a></b> &mdash; returns bottom of the chain.</li>
+            <li><b><a href="#next">next/1</a></b> &mdash; moves reader next.</li>
+            <li><b><a href="#prev">prev/1</a></b> &mdash; moves reader prev.</li>
+            <li><b><a href="#take">take/1</a></b> &mdash; takes N elements from reader.</li>
+            <li><b><a href="#drop">drop/1</a></b> &mdash; skips N elements from reader.</li>
+            <li><b><a href="#add">add/1</a></b> &mdash; adds element to list.</li>
+            <li><b><a href="#append">append/2</a></b> &mdash; adds element to feed.</li>
+            <li><b><a href="#append">cut/2</a></b> &mdash; cleanup feed by a given timestamp.</li>
+        </ul></p></blockquote></p>
+
+    <p>
+      You can grab <a style="margin-bottom:30px;"
+       href="https://raw.githubusercontent.com/synrc/kvs/master/src/kvs_stream.erl">kvs_stream</a>
+      and use it in your applications without importing <b>synrc/kvs</b> dependency,
+      as this module is self-containing.
+      The possible applications are: public and private feeds, FIFO queues,
+   unread messages, chat applications, blockchain, etc.<br><br>
+    </section>
+    <section>
+
+<h3>WRITER</h3>
+
+<p>Writer cursor represents append list chain with some cached values.
+   E.g., chain size, first element of the chain, cached value of
+   previous written message and field for passing arguments for stream functions, like add.</p>
+
+<figure><code>
+  -record(writer, { id    = [] :: term(),
+                    count =  0 :: integer(),
+                    cache = [] :: [] | tuple(),
+                    args  = [] :: term(),
+                    first = [] :: [] | tuple() } ).
+
+</code></figure>
+
+<p>For adding data to database you need first create writer cursor,
+   set the args field with record from metainfo and call save function.</p>
+
+<h4>writer(term()) -> #writer{}.</h4>
+
+<p>Creates writer cursor.</p>
+
+<h4>add(#writer{}) -> #writer{}.</h4>
+
+<p>Adds element to list declared by writer cursor.</p>
+
+<h4>load_writer(#writer{}) -> #writer{}.</h4>
+
+<p>Loads writer cursor.</p>
+
+<h4>save(#writer{}) -> #writer{}.</h4>
+
+<p>Flushes writer cursor to database.</p>
+
+    </section>
+    <section>
+
+<h3>READER</h3>
+
+<figure><figcaption>Reader Cursor</figcaption><code>
+  -record(reader, { id    = [] :: integer(),
+                    pos   =  0 :: [] | integer(),
+                    cache = [] :: [] | integer(),
+                    args  = [] :: term(),
+                    feed  = [] :: term(),
+                    dir   =  0 :: 0 | 1 } ).
+
+</code></figure>
+
+<h4>reader(integer()) -> #reader{}.</h4>
+
+<p>Creates reader cursor.</p>
+
+<h4>load_reader(#reader{}) -> #reader{}.</h4>
+
+<p>Loads reader cursor from database.</p>
+
+<h4>save(#reader{}) -> #reader{}.</h4>
+
+<p>Flushes cursor to database.</p>
+
+<h4>top(#reader{}) -> #reader{}.</h4>
+
+<p>Moves cursor to top of the list.</p>
+
+<h4>bot(#reader{}) -> #reader{}.</h4>
+
+<p>Moves cursor to bottom of the list.</p>
+
+    </section>
+    <section>
+
+<h3>ITER</h3>
+
+<figure><figcaption>KVS Stream Iterator</figcaption><code>
+  -record(iter,   { id    = [] :: [] | integer(),
+                    next  = [] :: [] | integer(),
+                    prev  = [] :: [] | integer() } ).
+
+</code></figure>
+
+<h4>next(#reader{}) -> #reader{}.</h4>
+
+<p>Moves cursor to next. Consume data down from top.
+   Reutrn error if lists is empty, otherwise next element or last.</p>
+
+<h4>prev(#reader{}) -> #reader{}.</h4>
+
+<p>Moves cursor to prev. Consume data up from bottom.
+   Reutrn error if lists is empty, otherwise next element or last.</p>
+
+<h4>drop(#reader{}) -> #reader{}.</h4>
+
+<p>Drops N elements starting from reader.</p>
+
+<h4>take(#reader{}) -> #reader{}.</h4>
+
+<p>Trying to consume N records from stream using its current value and direction.
+   Returns consumed data. Usually you seek to some position and then consume some data.</p>
+
+    </section>
+    <section>
+
+<p>This module may refer to:
+<a href="kvs.htm"><b>kvs</b></a>,
+<a href="kvs_st.htm"><b>kvs_st</b></a>.
+</p>
+
+    </section>
+</main>
+
+<footer>
+    2005&mdash;2019 &copy; Synrc Research Center
+</footer>
+
+</body>
+</html>

+ 24 - 0
mix.exs

@@ -0,0 +1,24 @@
+defmodule KVS.Mixfile do
+  use Mix.Project
+
+  def project do
+    [app: :kvs, version: "6.4.0", description: "Abstract Chain Database", package: package(), deps: deps()]
+  end
+
+  def application do
+    [mod: {:kvs, []}, applications: [:rocksdb]]
+  end
+
+  defp package do
+    [
+      files: ~w(include man etc src LICENSE mix.exs README.md rebar.config sys.config),
+      licenses: ["MIT"],
+      links: %{"GitHub" => "https://github.com/synrc/kvx"}
+    ]
+  end
+
+  defp deps do
+    [{:ex_doc, ">= 0.0.0", only: :dev},
+     {:rocksdb, ">= 1.2.0"}]
+  end
+end

+ 2 - 0
rebar.config

@@ -0,0 +1,2 @@
+{erl_opts, [nowarn_unused_function,nowarn_duplicated_export]}.
+{deps, []}.

+ 7 - 0
src/kvs.app.src

@@ -0,0 +1,7 @@
+{application, kvs,
+   [{description, "KVS Abstract Chain Database"},
+    {vsn, "6.6"},
+    {registered, []},
+    {applications, [kernel,stdlib,rocksdb]},
+    {mod, { kvs, []}},
+    {env, []} ]}.

+ 132 - 0
src/kvs.erl

@@ -0,0 +1,132 @@
+-module(kvs).
+-behaviour(application).
+-behaviour(supervisor).
+-description('KVX Abstract Chain Store').
+-include_lib("stdlib/include/assert.hrl").
+-include("api.hrl").
+-include("metainfo.hrl").
+-include("stream.hrl").
+-include("cursors.hrl").
+-include("kvs.hrl").
+-include("backend.hrl").
+-export([dump/0,check/0,metainfo/0,ensure/1,seq_gen/0,fold/6,fold/7]).
+-export(?API).
+-export(?STREAM).
+-export([init/1, start/2, stop/1]).
+
+init([]) -> {ok, { {one_for_one, 5, 10}, []} }.
+start(_,_) -> supervisor:start_link({local, kvs}, kvs, []).
+stop(_) -> ok.
+
+% kvs api
+
+dba()              -> application:get_env(kvs,dba,kvs_mnesia).
+kvs_stream()       -> application:get_env(kvs,dba_st,kvs_stream).
+all(Table)         -> all     (Table, #kvs{mod=dba()}).
+delete(Table,Key)  -> delete  (Table, Key, #kvs{mod=dba()}).
+get(Table,Key)     -> ?MODULE:get     (Table, Key, #kvs{mod=dba()}).
+index(Table,K,V)   -> index   (Table, K,V, #kvs{mod=dba()}).
+join()             -> join    ([],    #kvs{mod=dba()}).
+dump()             -> dump    (#kvs{mod=dba()}).
+join(Node)         -> join    (Node,  #kvs{mod=dba()}).
+leave()            -> leave   (#kvs{mod=dba()}).
+count(Table)       -> count   (Table, #kvs{mod=dba()}).
+put(Record)        -> ?MODULE:put     (Record, #kvs{mod=dba()}).
+fold(Fun,Acc,T,S,C,D) -> fold (Fun,Acc,T,S,C,D, #kvs{mod=dba()}).
+stop()             -> stop_kvs(#kvs{mod=dba()}).
+start()            -> start   (#kvs{mod=dba()}).
+ver()              -> ver(#kvs{mod=dba()}).
+dir()              -> dir     (#kvs{mod=dba()}).
+seq(Table,DX)      -> seq     (Table, DX, #kvs{mod=dba()}).
+
+% stream api
+
+top  (X) -> (kvs_stream()):top (X).
+bot  (X) -> (kvs_stream()):bot (X).
+next (X) -> (kvs_stream()):next(X).
+prev (X) -> (kvs_stream()):prev(X).
+drop (X) -> (kvs_stream()):drop(X).
+take (X) -> (kvs_stream()):take(X).
+save (X) -> (kvs_stream()):save(X).
+cut  (X,Y) -> (kvs_stream()):cut (X,Y).
+add  (X) -> (kvs_stream()):add (X).
+append  (X, Y) -> (kvs_stream()):append (X, Y).
+load_reader (X) -> (kvs_stream()):load_reader(X).
+writer      (X) -> (kvs_stream()):writer(X).
+reader      (X) -> (kvs_stream()):reader(X).
+ensure(#writer{id=Id}) ->
+   case kvs:get(writer,Id) of
+        {error,_} -> kvs:save(kvs:writer(Id)), ok;
+        {ok,_}    -> ok end.
+
+metainfo() ->  #schema { name = kvs, tables = core() }.
+core()    -> [ #table { name = id_seq, fields = record_info(fields,id_seq), keys=[thing]} ].
+
+initialize(Backend, Module) ->
+    [ begin
+        Backend:create_table(T#table.name, [{attributes,T#table.fields},
+               {T#table.copy_type, [node()]},{type,T#table.type}]),
+        [ Backend:add_table_index(T#table.name, Key) || Key <- T#table.keys ],
+        T
+    end || T <- (Module:metainfo())#schema.tables ].
+
+all(Tab,#kvs{mod=DBA}) -> DBA:all(Tab).
+start(#kvs{mod=DBA}) -> DBA:start().
+stop_kvs(#kvs{mod=DBA}) -> DBA:stop().
+join(Node,#kvs{mod=DBA}) -> DBA:join(Node).
+leave(#kvs{mod=DBA}) -> DBA:leave().
+ver(#kvs{mod=DBA}) -> DBA:version().
+tables() -> lists:flatten([ (M:metainfo())#schema.tables || M <- modules() ]).
+table(Name) when is_atom(Name) -> lists:keyfind(Name,#table.name,tables());
+table(_) -> false.
+dir(#kvs{mod=DBA}) -> DBA:dir().
+modules() -> application:get_env(kvs,schema,[]).
+cursors() ->
+    lists:flatten([ [ {T#table.name,T#table.fields}
+        || #table{name=Name}=T <- (M:metainfo())#schema.tables, Name == reader orelse Name == writer  ]
+    || M <- modules() ]).
+
+fold(___,Acc,_,[],_,_,_) -> Acc;
+fold(___,Acc,_,undefined,_,_,_) -> Acc;
+fold(___,Acc,_,_,0,_,_) -> Acc;
+fold(Fun,Acc,Table,Start,Count,Direction,Driver) ->
+    try
+    case kvs:get(Table, Start, Driver) of
+         {ok, R} -> Prev = element(Direction, R),
+                    Count1 = case Count of C when is_integer(C) -> C - 1; _-> Count end,
+                    fold(Fun, Fun(R,Acc), Table, Prev, Count1, Direction, Driver);
+          _Error -> Acc
+    end catch _:_ -> Acc end.
+
+seq_gen() ->
+    Init = fun(Key) ->
+           case kvs:get(id_seq, Key) of
+                {error, _} -> {Key,kvs:put(#id_seq{thing = Key, id = 0})};
+                {ok, _} -> {Key,skip} end end,
+    [ Init(atom_to_list(Name))  || {Name,_Fields} <- cursors() ].
+
+
+put(Records,#kvs{mod=Mod}) when is_list(Records) -> Mod:put(Records);
+put(Record,#kvs{mod=Mod}) -> Mod:put(Record).
+get(RecordName, Key, #kvs{mod=Mod}) -> Mod:get(RecordName, Key).
+delete(Tab, Key, #kvs{mod=Mod}) -> Mod:delete(Tab, Key).
+count(Tab,#kvs{mod=DBA}) -> DBA:count(Tab).
+index(Tab, Key, Value,#kvs{mod=DBA}) -> DBA:index(Tab, Key, Value).
+seq(Tab, Incr,#kvs{mod=DBA}) -> DBA:seq(Tab, Incr).
+dump(#kvs{mod=Mod}) -> Mod:dump().
+
+% tests
+
+check() ->
+    Id  = {list,kvs:seq(writer,1)},
+    X   = 5,
+    _W   = kvs:save(kvs:writer(Id)),
+    #reader{id=R1} = kvs:save(kvs:reader(Id)),
+    #reader{id=R2} = kvs:save(kvs:reader(Id)),
+    [ kvs:save(kvs:add((kvs:writer(Id))#writer{args={emails,[],[],[],[]}})) || _ <- lists:seq(1,X) ],
+    Bot = kvs:bot(kvs:load_reader(R1)),
+    Top = kvs:top(kvs:load_reader(R2)),
+    #reader{args=F} = kvs:take(Bot#reader{args=20,dir=0}),
+    #reader{args=B} = kvs:take(Top#reader{args=20,dir=1}),
+    ?assertMatch(X,length(F)),
+    ?assertMatch(F,lists:reverse(B)).

+ 108 - 0
src/layers/kvs_stream.erl

@@ -0,0 +1,108 @@
+-module(kvs_stream).
+-description('KVX STREAM LAYER').
+-include("kvs.hrl").
+-include("stream.hrl").
+-include("metainfo.hrl").
+-export(?STREAM).
+-export([metainfo/0]).
+
+% boot for sample
+
+metainfo() -> #schema { name = kvs,    tables = tables() }.
+tables() -> [ #table  { name = writer, fields = record_info(fields, writer) },
+              #table  { name = reader, fields = record_info(fields, reader) } ].
+
+% section: kvs_stream prelude
+
+se(X,Y,Z)  -> setelement(X,Y,Z).
+e(X,Y)  -> element(X,Y).
+c4(R,V) -> se(#reader.args,  R, V).
+sn(M,T) -> se(#iter.next, M, T).
+sp(M,T) -> se(#iter.prev, M, T).
+si(M,T) -> se(#iter.id, M, T).
+tab(T)  -> e(1, T).
+id(T)   -> e(#iter.id, T).
+en(T)   -> e(#iter.next, T).
+ep(T)   -> e(#iter.prev, T).
+acc(0)  -> next;
+acc(1)  -> prev.
+
+% section: next, prev
+
+top(#reader{feed=F}=C) -> w(kvs:get(writer,F),top,C).
+bot(#reader{feed=F}=C) -> w(kvs:get(writer,F),bot,C).
+
+next(#reader{cache=[]}) -> {error,empty};
+next(#reader{cache={T,R},pos=P}=C) -> n(kvs:get(T,R),C,P+1).
+
+prev(#reader{cache=[]}) -> {error,empty};
+prev(#reader{cache={T,R},pos=P}=C) -> p(kvs:get(T,R),C,P-1).
+
+n({ok,R},C,P)    -> r(kvs:get(tab(R),en(R)),C,P);
+n({error,X},_,_) -> {error,X}.
+p({ok,R},C,P)    -> r(kvs:get(tab(R),ep(R)),C,P);
+p({error,X},_,_) -> {error,X}.
+r({ok,R},C,P)    -> C#reader{cache={tab(R),id(R)},pos=P};
+r({error,X},_,_) -> {error,X}.
+w({ok,#writer{first=[]}},bot,C)           -> C#reader{cache=[],pos=1};
+w({ok,#writer{first=B}},bot,C)            -> C#reader{cache={tab(B),id(B)},pos=1};
+w({ok,#writer{cache=B,count=Size}},top,C) -> C#reader{cache={tab(B),id(B)},pos=Size};
+w({error,X},_,_)                          -> {error,X}.
+
+% section: take, drop
+
+drop(#reader{cache=[]}=C) -> C#reader{args=[]};
+drop(#reader{dir=D,cache=B,args=N,pos=P}=C)  -> drop(acc(D),N,C,C,P,B).
+take(#reader{cache=[]}=C) -> C#reader{args=[]};
+take(#reader{dir=D,cache=B,args=N,pos=P}=C)  -> take(acc(D),N,C,C,[],P,B).
+
+take(_,_,{error,_},C2,R,P,B) -> C2#reader{args=lists:flatten(R),pos=P,cache=B};
+take(_,0,_,C2,R,P,B)         -> C2#reader{args=lists:flatten(R),pos=P,cache=B};
+take(A,N,#reader{cache={T,I},pos=P}=C,C2,R,_,_) ->
+    take(A,N-1,?MODULE:A(C),C2,[element(2,kvs:get(T,I))|R],P,{T,I}).
+
+drop(_,_,{error,_},C2,P,B)     -> C2#reader{pos=P,cache=B};
+drop(_,0,_,C2,P,B)             -> C2#reader{pos=P,cache=B};
+drop(A,N,#reader{cache=B,pos=P}=C,C2,_,_) ->
+    drop(A,N-1,?MODULE:A(C),C2,P,B).
+
+% new, save, load, up, down, top, bot
+
+load_reader (Id) -> case kvs:get(reader,Id) of {ok,C} -> C; _ -> #reader{id=[]} end.
+writer (Id) -> case kvs:get(writer,Id) of {ok,W} -> W; _ -> #writer{id=Id} end.
+reader (Id) -> case kvs:get(writer,Id) of
+         {ok,#writer{first=[]}} -> #reader{id=kvs:seq(reader,1),feed=Id,cache=[]};
+         {ok,#writer{first=F}}  -> #reader{id=kvs:seq(reader,1),feed=Id,cache={tab(F),id(F)}};
+         {error,_} -> kvs:save(#writer{id=Id}), reader(Id) end.
+save (C) -> NC = c4(C,[]), kvs:put(NC), NC.
+
+% add
+
+add(#writer{args=M}=C) when element(2,M) == [] -> add(si(M,kvs:seq(tab(M),1)),C);
+add(#writer{args=M}=C) -> add(M,C).
+
+add(M,#writer{cache=[]}=C) ->
+    _Id=id(M), N=sp(sn(M,[]),[]), kvs:put(N),
+    C#writer{cache=N,count=1,first=N};
+
+%add(M,#writer{cache=V,count=S}=C) ->
+%    N=sp(sn(M,[]),id(V)), P=sn(V,id(M)), kvs:put([N,P]),
+%    C#writer{cache=N,count=S+1}.
+
+add(M,#writer{cache=V1,count=S}=C) ->
+    {ok,V} = kvs:get(tab(V1),id(V1)),
+    N=sp(sn(M,[]),id(V)), P=sn(V,id(M)), kvs:put([N,P]),
+    C#writer{cache=N,count=S+1}.
+
+append(Rec,Feed) ->
+   kvs:ensure(#writer{id=Feed}),
+   Name = element(1,Rec),
+   Id = element(2,Rec),
+   case kvs:get(Name,Id) of
+        {ok,_}    -> Id;
+        {error,_} -> kvs:save(kvs:add((kvs:writer(Feed))#writer{args=Rec})), Id end.
+
+cut(_Feed,Id) ->
+   case kvs:get(writer,Id) of
+        {ok,#writer{count=N}} -> {ok,N};
+        {error,_} -> {error,not_found} end.

+ 84 - 0
src/stores/kvs_fs.erl

@@ -0,0 +1,84 @@
+-module(kvs_fs).
+-include("backend.hrl").
+-include("kvs.hrl").
+-include("metainfo.hrl").
+-include_lib("stdlib/include/qlc.hrl").
+-export(?BACKEND).
+start()    -> ok.
+stop()     -> ok.
+destroy()  -> ok.
+version()  -> {version,"KVX FS"}.
+dir()      -> [ {table,F} || F <- filelib:wildcard("data/*"), filelib:is_dir(F) ].
+join(_Node) -> filelib:ensure_dir("data/"), initialize(). % should be rsync or smth
+initialize() ->
+    mnesia:create_schema([node()]),
+    [ kvs:initialize(kvs_fs,Module) || Module <- kvs:modules() ],
+    mnesia:wait_for_tables([ T#table.name || T <- kvs:tables()],infinity).
+
+index(_Tab,_Key,_Value) -> [].
+get(TableName, Key) ->
+    HashKey = encode(base64:encode(crypto:hash(sha, term_to_binary(Key)))),
+    Dir = lists:concat(["data/",TableName,"/"]),
+    case file:read_file(lists:concat([Dir,HashKey])) of
+         {ok,Binary} -> {ok,binary_to_term(Binary,[safe])};
+         {error,Reason} -> {error,Reason} end.
+put(Records) when is_list(Records) -> lists:map(fun(Record) -> put(Record) end, Records);
+put(Record) ->
+    TableName = element(1,Record),
+    HashKey = encode(base64:encode(crypto:hash(sha, term_to_binary(element(2,Record))))),
+    BinaryValue = term_to_binary(Record),
+    Dir = lists:concat(["data/",TableName,"/"]),
+    filelib:ensure_dir(Dir),
+    File = lists:concat([Dir,HashKey]),
+    file:write_file(File,BinaryValue,[write,raw,binary,sync]).
+
+delete(_Tab, _Key) -> case kvs:get(_Tab,_Key) of {ok,_} -> ok; {error,X} -> {error,X} end.
+count(RecordName) -> length(filelib:fold_files(lists:concat(["data/",RecordName]), "",true, fun(A,Acc)-> [A|Acc] end, [])).
+all(R) -> lists:flatten([ begin case file:read_file(File) of
+                        {ok,Binary} -> binary_to_term(Binary,[safe]);
+                        {error,_Reason} -> [] end end || File <-
+      filelib:fold_files(lists:concat(["data/",R]), "",true, fun(A,Acc)-> [A|Acc] end, []) ]).
+seq(RecordName, Incr) -> kvs_mnesia:seq(RecordName, Incr).
+create_table(Name,_Options) -> filelib:ensure_dir(lists:concat(["data/",Name,"/"])).
+add_table_index(_Record, _Field) -> ok.
+
+% URL ENCODE
+
+encode(B) when is_binary(B) -> encode(binary_to_list(B));
+encode([C | Cs]) when C >= $a, C =< $z -> [C | encode(Cs)];
+encode([C | Cs]) when C >= $A, C =< $Z -> [C | encode(Cs)];
+encode([C | Cs]) when C >= $0, C =< $9 -> [C | encode(Cs)];
+encode([C | Cs]) when C == 16#20 -> [$+ | encode(Cs)];
+
+% unreserved
+encode([C = $- | Cs]) -> [C | encode(Cs)];
+encode([C = $_ | Cs]) -> [C | encode(Cs)];
+encode([C = 46 | Cs]) -> [C | encode(Cs)];
+encode([C = $! | Cs]) -> [C | encode(Cs)];
+encode([C = $~ | Cs]) -> [C | encode(Cs)];
+encode([C = $* | Cs]) -> [C | encode(Cs)];
+encode([C = 39 | Cs]) -> [C | encode(Cs)];
+encode([C = $( | Cs]) -> [C | encode(Cs)];
+encode([C = $) | Cs]) -> [C | encode(Cs)];
+
+encode([C | Cs]) when C =< 16#7f -> escape_byte(C) ++ encode(Cs);
+encode([C | Cs]) when (C >= 16#7f) and (C =< 16#07FF) ->
+  escape_byte((C bsr 6) + 16#c0)
+  ++ escape_byte(C band 16#3f + 16#80)
+  ++ encode(Cs);
+encode([C | Cs]) when (C > 16#07FF) ->
+  escape_byte((C bsr 12) + 16#e0) % (0xe0 | C >> 12)
+  ++ escape_byte((16#3f band (C bsr 6)) + 16#80) % 0x80 | ((C >> 6) & 0x3f)
+  ++ escape_byte(C band 16#3f + 16#80) % 0x80 | (C >> 0x3f)
+  ++ encode(Cs);
+encode([C | Cs]) -> escape_byte(C) ++ encode(Cs);
+encode([]) -> [].
+
+hex_octet(N) when N =< 9 -> [$0 + N];
+hex_octet(N) when N > 15 -> hex_octet(N bsr 4) ++ hex_octet(N band 15);
+hex_octet(N) -> [N - 10 + $a].
+escape_byte(C) -> normalize(hex_octet(C)).
+normalize(H) when length(H) == 1 -> "%0" ++ H;
+normalize(H) -> "%" ++ H.
+
+dump() -> ok.

+ 90 - 0
src/stores/kvs_mnesia.erl

@@ -0,0 +1,90 @@
+-module(kvs_mnesia).
+-include("backend.hrl").
+-include("kvs.hrl").
+-include("metainfo.hrl").
+-include_lib("mnesia/src/mnesia.hrl").
+-include_lib("stdlib/include/qlc.hrl").
+-export(?BACKEND).
+-export([info/1,exec/1,sync_indexes/0,sync_indexes/1,dump/1]).
+start()    -> mnesia:start().
+stop()     -> mnesia:stop().
+destroy()  -> [mnesia:delete_table(T)||{_,T}<-kvs:dir()], mnesia:delete_schema([node()]), ok.
+version()  -> {version,"KVX MNESIA"}.
+dir()      -> [{table,T}||T<-mnesia:system_info(local_tables)].
+join([])   -> mnesia:start(), mnesia:change_table_copy_type(schema, node(), disc_copies), initialize();
+join(Node) ->
+    mnesia:start(),
+    mnesia:change_config(extra_db_nodes, [Node]),
+    mnesia:change_table_copy_type(schema, node(), disc_copies),
+    [{Tb, mnesia:add_table_copy(Tb, node(), Type)}
+     || {Tb, [{N, Type}]} <- [{T, mnesia:table_info(T, where_to_commit)}
+                               || T <- mnesia:system_info(tables)], Node==N].
+
+initialize() ->
+    mnesia:create_schema([node()]),
+    Res = [ kvs:initialize(kvs_mnesia,Module) || Module <- kvs:modules() ],
+    mnesia:wait_for_tables([ T#table.name || T <- kvs:tables()],infinity),
+    Res.
+
+index(Tab,Key,Value) ->
+    Table = kvs:table(Tab),
+    Index = string:str(Table#table.fields,[Key]),
+    lists:flatten(many(fun() -> mnesia:index_read(Tab,Value,Index+1) end)).
+
+get(RecordName, Key) -> just_one(fun() -> mnesia:read(RecordName, Key) end).
+put(Records) when is_list(Records) -> void(fun() -> lists:foreach(fun mnesia:write/1, Records) end);
+put(Record) -> put([Record]).
+delete(Tab, Key) ->
+    case mnesia:activity(context(),fun()-> mnesia:delete({Tab, Key}) end) of
+        {aborted,Reason} -> {error,Reason};
+        {atomic,_Result} -> ok;
+        _ -> ok end.
+count(RecordName) -> mnesia:table_info(RecordName, size).
+all(R) -> lists:flatten(many(fun() -> L= mnesia:all_keys(R), [ mnesia:read({R, G}) || G <- L ] end)).
+seq([],[]) -> os:system_time();
+seq(RecordName, Incr) -> mnesia:dirty_update_counter({id_seq, RecordName}, Incr).
+many(Fun) -> case mnesia:activity(context(),Fun) of {atomic, R} -> R; {aborted, Error} -> {error, Error}; X -> X end.
+void(Fun) -> case mnesia:activity(context(),Fun) of {atomic, ok} -> ok; {aborted, Error} -> {error, Error}; X -> X end.
+info(T) -> try mnesia:table_info(T,all) catch _:_ -> [] end.
+create_table(Name,Options) -> mnesia:create_table(Name, Options).
+add_table_index(Record, Field) -> mnesia:add_table_index(Record, Field).
+exec(Q) -> F = fun() -> qlc:e(Q) end, {atomic, Val} = mnesia:activity(context(),F), Val.
+just_one(Fun) ->
+    case mnesia:activity(context(),Fun) of
+        {atomic, []} -> {error, not_found};
+        {atomic, R} -> {ok, R};
+        [] -> {error, not_found};
+        R when is_list(R) -> {ok,R};
+        Error -> Error end.
+
+%add(Record) -> mnesia:activity(context(),fun() -> kvs:append(Record,#kvs{mod=?MODULE}) end).
+context() -> application:get_env(kvs,mnesia_context,async_dirty).
+
+sync_indexes() ->
+    lists:map(fun sync_indexes/1, kvs:tables()).
+sync_indexes(#table{name = Table, keys = Keys}) ->
+    mnesia:wait_for_tables(Table, 10000),
+    #cstruct{attributes = Attrs} = mnesia:table_info(Table, cstruct),
+    Indexes = mnesia:table_info(Table, index),
+    IndexedKeys = [lists:nth(I-1, Attrs)|| I <- Indexes],
+    [mnesia:del_table_index(Table, Key) || Key <- IndexedKeys -- Keys],
+    [mnesia:add_table_index(Table, Key) || Key <- Keys -- IndexedKeys].
+
+dump() -> dump([ N || #table{name=N} <- kvs:tables() ]), ok.
+dump(short) ->
+    Gen = fun(T) ->
+        {S,M,C}=lists:unzip3([ dump_info(R) || R <- T ]),
+        {lists:usort(S),lists:sum(M),lists:sum(C)}
+    end,
+    dump_format([ {T,Gen(T)} || T <- [ N || #table{name=N} <- kvs:tables() ] ]);
+dump(Table) when is_atom(Table) -> dump(Table);
+dump(Tables) ->
+    dump_format([{T,dump_info(T)} || T <- lists:flatten(Tables) ]).
+dump_info(T) ->
+    {mnesia:table_info(T,storage_type),
+    mnesia:table_info(T,memory) * erlang:system_info(wordsize) / 1024 / 1024,
+    mnesia:table_info(T,size)}.
+dump_format(List) ->
+    io:format("~20s ~32s ~14s ~10s~n~n",["NAME","STORAGE TYPE","MEMORY (MB)","ELEMENTS"]),
+    [ io:format("~20s ~32w ~14.2f ~10b~n",[T,S,M,C]) || {T,{S,M,C}} <- List ],
+    io:format("~nSnapshot taken: ~p~n",[calendar:now_to_datetime(os:timestamp())]).

+ 55 - 0
src/stores/kvs_rocks.erl

@@ -0,0 +1,55 @@
+-module(kvs_rocks).
+-include("backend.hrl").
+-include("kvs.hrl").
+-include("metainfo.hrl").
+-include_lib("stdlib/include/qlc.hrl").
+-export(?BACKEND).
+-export([ref/0,next/8]).
+
+start()    -> ok.
+stop()     -> ok.
+destroy()  -> ok.
+version()  -> {version,"KVX ROCKSDB"}.
+dir()      -> [].
+leave() -> case ref() of [] -> skip; X -> rocksdb:close(X) end.
+join(_) -> application:start(rocksdb),
+           leave(), {ok, Ref} = rocksdb:open(application:get_env(kvs,rocks_name,"rocksdb"), [{create_if_missing, true}]),
+           initialize(),
+           application:set_env(kvs,rocks_ref,Ref).
+initialize() -> [ kvs:initialize(kvs_rocks,Module) || Module <- kvs:modules() ].
+ref() -> application:get_env(kvs,rocks_ref,[]).
+index(_,_,_) -> [].
+get(Tab, Key) ->
+    Address = <<(list_to_binary(lists:concat(["/",io_lib:format("~p",[Tab]),"/"])))/binary,(term_to_binary(Key))/binary>>,
+    case rocksdb:get(ref(), Address, []) of
+         not_found -> {error,not_found};
+         {ok,Bin} -> {ok,binary_to_term(Bin,[safe])} end.
+
+put(Records) when is_list(Records) -> lists:map(fun(Record) -> put(Record) end, Records);
+put(Record) -> rocksdb:put(ref(), <<(list_to_binary(lists:concat(["/",element(1,Record),"/"])))/binary,
+                                    (term_to_binary(element(2,Record)))/binary>>, term_to_binary(Record), [{sync,true}]).
+
+delete(Feed, Id) ->
+    Key    = list_to_binary(lists:concat(["/",io_lib:format("~p",[Feed]),"/"])),
+    A      = <<Key/binary,(term_to_binary(Id))/binary>>,
+    rocksdb:delete(ref(), A, []).
+
+count(_) -> 0.
+all(R) -> {ok,I} = rocksdb:iterator(ref(), []),
+           Key = list_to_binary(lists:concat(["/",io_lib:format("~p",[R])])),
+           First = rocksdb:iterator_move(I, {seek,Key}),
+           lists:reverse(next(I,Key,size(Key),First,[],[],-1,0)).
+
+next(_,_,_,_,_,T,N,C) when C == N -> T;
+next(I,Key,S,{ok,A,X},_,T,N,C) -> next(I,Key,S,A,X,T,N,C);
+next(_,___,_,{error,_},_,T,_,_) -> T;
+next(I,Key,S,A,X,T,N,C) ->
+     case binary:part(A,0,S) of Key ->
+          next(I,Key,S,rocksdb:iterator_move(I, next), [],
+                       [binary_to_term(X,[safe])|T],N,C+1);
+                  _ -> T end.
+
+seq(_,_) -> integer_to_list(os:system_time()).
+create_table(_,_) -> [].
+add_table_index(_, _) -> ok.
+dump() -> ok.

+ 118 - 0
src/stores/kvs_st.erl

@@ -0,0 +1,118 @@
+-module(kvs_st).
+-description('KVX STREAM NATIVE ROCKS').
+-include("kvs.hrl").
+-include("stream.hrl").
+-include("metainfo.hrl").
+-export(?STREAM).
+
+ref() -> kvs_rocks:ref().
+
+% section: kvs_stream prelude
+
+se(X,Y,Z)  -> setelement(X,Y,Z).
+e(X,Y)  -> element(X,Y).
+c3(R,V) -> se(#reader.cache, R, V).
+c4(R,V) -> se(#reader.args,  R, V).
+si(M,T) -> se(#it.id, M, T).
+id(T)   -> e(#it.id, T).
+
+% section: next, prev
+
+top  (#reader{}=C) -> C.
+bot  (#reader{}=C) -> C.
+
+next (#reader{cache=[]}) -> {error,empty};
+next (#reader{cache=I}=C) ->
+   case rocksdb:iterator_move(I, next) of
+        {ok,_,Bin} -> C#reader{cache=binary_to_term(Bin,[safe])};
+            {error,Reason} -> {error,Reason} end.
+
+prev (#reader{cache=[]}) -> {error,empty};
+prev (#reader{cache=I}=C) ->
+   case rocksdb:iterator_move(I, prev) of
+        {ok,_,Bin} -> C#reader{cache=binary_to_term(Bin,[safe])};
+            {error,Reason} -> {error,Reason} end.
+
+% section: take, drop
+
+drop(#reader{args=N}) when N < 0 -> #reader{};
+
+drop(#reader{args=N,feed=Feed,cache=I}=C) when N == 0 ->
+   Key = list_to_binary(lists:concat(["/",io_lib:format("~p",[Feed])])),
+   case rocksdb:iterator_move(I, {seek,Key}) of
+        {ok,_,Bin} -> C#reader{cache=binary_to_term(Bin,[safe])};
+                 _ -> C#reader{cache=[]} end;
+
+drop(#reader{args=N,feed=Feed,cache=I}=C) when N > 0 ->
+   Key   = list_to_binary(lists:concat(["/",io_lib:format("~p",[Feed])])),
+   First = rocksdb:iterator_move(I, {seek,Key}),
+   Term  = lists:foldl(
+    fun (_,{{ok,K,_},{_,X}}) when N > X -> {K,{<<131,106>>,N}};
+        (_,{{ok,K,Bin},{A,X}}) when N =< X->
+           case binary:part(K,0,size(Key)) of
+                Key -> {rocksdb:iterator_move(I,next),{Bin,X+1}};
+                  _ -> {{error,range},{A,X}} end;
+        (_,{_,{_,_}}) -> {[],{<<131,106>>,N}}
+     end,
+           {First,{<<131,106>>,1}},
+           lists:seq(0,N)),
+   C#reader{cache=binary_to_term(element(1,element(2,Term)))}.
+
+take(#reader{args=N,feed=Feed,cache=I,dir=Dir}=C) ->
+   Key   = list_to_binary(lists:concat(["/",io_lib:format("~p",[Feed])])),
+   First = rocksdb:iterator_move(I, {seek,Key}),
+   Res   = kvs_rocks:next(I,Key,size(Key),First,[],[],N,0),
+   C#reader{args= case Dir of 0 -> Res; 1 -> lists:reverse(Res) end}.
+
+% new, save, load, up, down, top, bot
+
+load_reader (Id) ->
+    case kvs:get(reader,Id) of
+         {ok,#reader{}=C} -> C#reader{cache=element(2,rocksdb:iterator(ref(),[]))};
+              _ -> #reader{id=[]} end.
+
+writer (Id) -> case kvs:get(writer,Id) of {ok,W} -> W; {error,_} -> #writer{id=Id} end.
+reader (Id) ->
+    case kvs:get(writer,Id) of
+         {ok,#writer{}} ->
+             {ok,I} = rocksdb:iterator(ref(), []),
+             #reader{id=kvs:seq([],[]),feed=Id,cache=I};
+         {error,_} -> #reader{} end.
+save (C) -> NC = c4(C,[]), N2 = c3(NC,[]), kvs:put(N2), N2.
+
+% add
+
+add(#writer{args=M}=C) when element(2,M) == [] -> add(si(M,kvs:seq([],[])),C);
+add(#writer{args=M}=C) -> add(M,C).
+
+add(M,#writer{id=Feed,count=S}=C) -> NS=S+1,
+    rocksdb:put(ref(),
+       <<(list_to_binary(lists:concat(["/",io_lib:format("~p",[Feed]),"/"])))/binary,
+         (term_to_binary(id(M)))/binary>>, term_to_binary(M), [{sync,true}]),
+    C#writer{cache=M,count=NS}.
+
+append(Rec,Feed) ->
+   kvs:ensure(#writer{id=Feed}),
+   Id = element(2,Rec),
+   case kvs:get(Feed,Id) of
+        {ok,_}    -> Id;
+        {error,_} -> kvs:save(kvs:add((kvs:writer(Feed))#writer{args=Rec})), Id end.
+
+prev(_,_,_,_,_,_,N,C) when C == N -> C;
+prev(I,Key,S,{ok,A,X},_,T,N,C) -> prev(I,Key,S,A,X,T,N,C);
+prev(_,___,_,{error,_},_,_,_,C) -> C;
+prev(I,Key,S,A,_,_,N,C) when size(A) > S ->
+     case binary:part(A,0,S) of Key ->
+          rocksdb:delete(ref(), A, []),
+          Next = rocksdb:iterator_move(I, prev),
+          prev(I,Key, S, Next, [], A, N, C + 1);
+                                  _ -> C end;
+prev(_,_,_,_,_,_,_,C) -> C.
+
+cut(Feed,Id) ->
+    Key    = list_to_binary(lists:concat(["/",io_lib:format("~p",[Feed]),"/"])),
+    A      = <<Key/binary,(term_to_binary(Id))/binary>>,
+    {ok,I} = rocksdb:iterator(ref(), []),
+    case rocksdb:iterator_move(I, {seek,A}) of
+         {ok,A,X} -> {ok,prev(I,Key,size(Key),A,X,[],-1,0)};
+                _ -> {error,not_found} end.

+ 4 - 0
sys.config

@@ -0,0 +1,4 @@
+[ {kvs,[{dba,kvs_rocks},
+        {dba_st,kvs_st},
+        {schema,[kvs,kvs_stream]}]}
+].