// 'DocumentStore' is a main-entry point for client API.
// It is responsible for managing and establishing connections
// between your application and RavenDB server/cluster
// and is capable of working with multiple databases at once.
// Due to it's nature, it is recommended to have only one
// singleton instance per application
public static class DocumentStoreHolder
{
private static readonly Lazy<IDocumentStore> LazyStore =
new Lazy<IDocumentStore>(() =>
{
var store = new DocumentStore
{
Urls = new[] { "http://localhost:8080" },
Database = "Northwind"
};
return store.Initialize();
});
public static IDocumentStore Store => LazyStore.Value;
}
// 'DocumentSession' or 'Session' is a short-living and lightweight object
// used as a main and recommended interaction point with the database.
// With 'Session' you are able to load documents, modify them, save them back
// issue queries, patches, and much more.
// It implements Unit Of Work concept and is batching requests
// to reduce the expensive network traffic
using (IDocumentSession session = DocumentStoreHolder.Store.OpenSession())
{
// code here
}
var category = new Category
{
Name = "Electronics"
};
// Store the category in 'Session'
// and automatically assign Id using HiLo algorithm
session.Store(category);
var product = new Product
{
Name = "Laptop 2000",
Category = category.Id // use the previously assigned Id
};
// Store the product in 'Session'
// and automatically assign Id using HiLo algorithm
session.Store(product);
// Synchronize changes with the server.
// All changes will be send in one batch
// that will be processed as _one_ ACID transaction
session.SaveChanges();
// Load the 'Employee' and start tracking its changes
Employee employee = session.Load<Employee>("employees/1-A");
// Apply modifications
employee.FirstName = "Juliette";
// Synchronize changes with the server.
// All changes will be send in one batch
// that will be processed as _one_ ACID transaction
session.SaveChanges();
// return all entities from 'Employees' collection
// where FirstName equals 'Robert'
List<Employee> employees = session
.Query<Employee>()
.Where(x => x.FirstName == "Robert")
.ToList();
// return all 'LastNames' from 'Employees' collection
// that were born after '1960-01-01'
List<string> lastNames = session
.Query<Employee>()
.Where(x => x.Birthday > new DateTime(1960, 1, 1))
.Select(x => x.LastName)
.ToList();
// 'DocumentStore' is a main-entry point for client API.
// It is responsible for managing and establishing connections
// between your application and RavenDB server/cluster
// and is capable of working with multiple databases at once.
// Due to it's nature, it is recommended to have only one
// singleton instance per application
public class DocumentStoreHolder {
private static class DocumentStoreContainer {
public static final IDocumentStore store =
new DocumentStore("http://localhost:8080", "Northwind");
static {
store.initialize();
}
}
public static IDocumentStore getStore() {
return DocumentStoreContainer.store;
}
}
// 'DocumentSession' or 'Session' is a short-living and lightweight object
// used as a main and recommended interaction point with the database.
// With 'Session' you are able to load documents, modify them, save them back
// issue queries, patches, and much more.
// It implements Unit Of Work concept and is batching requests
// to reduce the expensive network traffic
try (IDocumentSession session = DocumentStoreHolder.getStore().openSession()) {
{
// code here
}
Category category = new Category();
category.setName("Electronics");
// Store the category in 'Session'
// and automatically assign Id using HiLo algorithm
session.store(category);
Product product = new Product();
product.setName("Laptop 2000");
product.setCategory(category.getId()); // use the previously assigned Id
// Store the product in 'Session'
// and automatically assign Id using HiLo algorithm
session.store(product);
// Synchronize changes with the server.
// All changes will be send in one batch
// that will be processed as _one_ ACID transaction
session.saveChanges();
// Load the 'Employee' and start tracking its changes
Employee employee = session.load(Employee.class, "employees/1-A");
// Apply modifications
employee.setFirstName("Juliette");
// Synchronize changes with the server.
// All changes will be send in one batch
// that will be processed as _one_ ACID transaction
session.saveChanges();
// return all entities from 'Employees' collection
// where firstName equals 'Robert'
List<Employee> employees = session.query(Employee.class)
.whereEquals("firstName", "Robert")
.toList();
// return all 'lastNames' from 'Employees' collection
// that are over 18
List<String> lastNames = session.query(Employee.class)
.whereGreaterThanOrEqual("age", 18)
.selectFields(String.class, "lastName")
.toList();
// 'DocumentStore' is a main-entry point for client API.
// It is responsible for managing and establishing connections
// between your application and RavenDB server/cluster
// and is capable of working with multiple databases at once.
// Due to it's nature, it is recommended to have only one
// singleton instance per application
import { DocumentStore } from "ravendb";
const documentStore = new DocumentStore("http://localhost:8080", "Northwind");
documentStore.initialize();
// 'DocumentSession' or 'Session' is a short-living and lightweight object
// used as a main and recommended interaction point with the database.
// With 'Session' you are able to load documents, modify them, save them back
// issue queries, patches, and much more.
// It implements Unit Of Work concept and is batching requests
// to reduce the expensive network traffic
const session = documentStore.openSession();
// code here
const category = new Category();
category.name = "Electronics";
// Store the category in 'Session'
// and automatically assign Id using HiLo algorithm
await session.store(category);
const product = new Product();
product.name = "Laptop 2000";
product.category = category.id; // use the previously assigned Id
// Store the product in 'Session'
// and automatically assign Id using HiLo algorithm
await session.store(product);
// Synchronize changes with the server.
// All changes will be send in one batch
// that will be processed as _one_ ACID transaction
await session.saveChanges();
// Load the 'Employee' and start tracking its changes
const employee = await session.load("employees/1-A");
// Apply modifications
employee.firstName = "Juliette";
// Synchronize changes with the server.
// All changes will be send in one batch
// that will be processed as _one_ ACID transaction
await session.saveChanges();
// return all entities from 'Employees' collection
// where firstName equals 'Robert'
const employees = await session.query({ collection: "Employees" })
.whereEquals("firstName", "Robert")
.all();
// return all 'lastNames' from 'Employees' collection
// that are over 18
const lastNames = await session.query({ collection: "Employees" })
.whereGreaterThanOrEqual("age", 18)
.selectFields("lastName")
.all();
Countless systems, including an ever-growing number of IoT (Internet of Things) devices, produce streams of values that can show the behavior and development of a process over time. The barometer measurements produced by a weather station, the coordinates produced by a delivery truck’s GPS, share prices reported by a stock exchange, and the heartrate of a runner, reported by a wearable pulse tracker, are values that can be collected and put to very good use. A time-series is a sequence of data points in which collected values are ordered by time, to ease their management and usage.
RavenDB 5.0’s time-series support includes a highly-efficient time-series storage, a comfortable and thorough set of API methods, the support of core features like indexing and querying and of dedicated time-series features like rollup and retention, and the full support of a management Studio that lets you conveniently watch and manage time-series, play with them, query and plot them in graphs.
Features:
RavenDB is a distributed document database, and its time-series management reflects it and is empowered by it.
// Open a session
using (var session = store.OpenSession())
{
// Use the session to create a document
session.Store(new { Name = "John" }, "users/john");
// Append a Heartrate entry at the first-minute timestamp
session.TimeSeriesFor("users/john", "Hearthrate")
.Append(baseline.AddMinutes(1), 70d, "Watches/fibit");
session.SaveChanges();
}
// Retrieve all entries of the "Hearthatea" time-series
using (var session = store.OpenSession())
{
IEnumerable<TimeSeriesEntry> val = session.TimeSeriesFor("users/john", "Heartrate")
.Get(DateTime.MinValue, DateTime.MaxValue);
}
Time-series are segmented, to speed up load and query time by handling only the segments that contain relevant data.
A time-series entry is composed of:
Focus on doing queries, while RavenDB creates the indexes for you.
Auto Indexes are designed when there is no need to customize the capabilities extensively.
With Auto Indexes you will be able to perform queries, including advanced ones like full-text search.
RavenDB supports following auto index types:
RavenDB learns the behavior of your application to merge auto indexes to reduce your overhead. It will schedule unused auto indexes for deletion.
Harness the full potential of indexing.
With Static Indexes, your indexing capabilities are almost endless. Use C# LINQ or JavaScript functions to shape the indexation to address your needs by defining single or multiple mapping functions and reduce (aggregate) the results if necessary.
RavenDB supports the following index types:
Each Static Index allows you to configure the behavior of each indexed field with the following options:
You can override server or database settings regarding indexing on a per index basis or add your own custom C# or JavaScript code to be used during indexing.
Attachment content and metadata can now be indexed using static indexes.
Attachment content and metadata can now be indexed using static indexes.
An attachment is a kind of data that is associated with some document, but either it can't be expressed as JSON, or we’d prefer to load and modify it separately from the document itself. Examples of attachments might be images, audio, or just pure binary data.
The attachment content can be loaded to the index as a Stream or as a string. Besides that, the following metadata can be accessed: the attachment name, its size, its hash, and its content type.
Import libraries from NuGet and other sources into your indexes.
The new Additional Assemblies feature allows you to import libraries from NuGet and other sources into your indexes so that they can be used within the index logic itself. This makes it possible to integrate a variety of existing technologies such as:
Besides NuGet, Additional Assemblies also allows you to import libraries from runtime or from a local folder.
Take a peek on what is going on internally.
As a rule, RavenDB gives you as much valuable debugging information as possible.
We created an indexing performance graph, to see indexing internals, particular steps in the process, their timings, and the amount of input and output documents. RavenDB makes indexing as transparent as possible to help you make the best analysis and take the optimal next steps.
Map-Reduce visualizer will take you into deepest abyss of the map-reduction itself. You can understand what mapping results your documents are yielding and follow reduction steps that are taking place before you reach the final result.
Count fast and easy in a distributed manner
Distributed Counters are numeric data variables that can be added to your documents. They are designed for scenarios like:
using (var session = store.OpenSession())
{
// increment counter 'likes' of document 'users/1-A' by 10
session.CountersFor("users/1-A").Increment("likes", 10);
session.SaveChanges();
}
Manage the History of Changes to Your Documents.
Track the history of changes for a document. Compare each revision, or revert to a specific one.
You can revise your entire database to a specific point in time in a single click. You can also revise a specific collection or collections.
Define document expiration dates and schedule cleanup intervals.
Link binary data to your documents with ease.
Retrieve documents in a handy and reliable way for processing.
Perform persistant live queries with hight availability in mind.
In order to use a DataSubscription, it should be defined first:
store.Subscriptions.Create(new Raven.Client.Documents.Subscriptions.SubscriptionCreationOptions
{
Name = "Heavy Orders Tracking",
Query = @"
from Orders as o
where o.Freight > 50
select
{
Freight: o.Freight,
ShipTo: o.ShipTo,
Employee: o.Employee,
Products: o.Lines.map(x=>x.Product)
}"
});
The subscription definition is being sent to the server and stored in the cluster.
The SubscriptionWorker allows processing data on demand, and continue from the point it last stopped:
The worker will continue running until stopped by user or processing error.
var workerTask = worker.Run(async x => {
foreach (var orderSummary in x.Items)
{
await NotifyLogisticsDepartmentAboutHeavyShipment(orderSummary.Result);
}
});
With the massive amounts of information that modern applications are expected to handle, it’s important to be able to compress data into smaller sizes to save storage space. In most databases, this presents a tradeoff - the more efficiently data is compressed, the slower it becomes to store, retrieve, or modify it. RavenDB 5.0 introduces a new way of compressing documents more efficiently, without sacrificing the high performance and speed you’ve come to expect.
In many use cases, the data stored in RavenDB consists of many documents with similar structures or other commonalities. A lot of space could be saved by compressing collections of similar documents as a single unit. But this would mean that to load a single document, the entire collection would have to be decompressed. To add, delete, or change a single document, the entire collection would have to be compressed again in a different way. Past versions of RavenDB offered ways of compressing the data within documents, but we found that compressing multiple documents simply wasn’t worth the performance cost.
In RavenDB 5.0 we have introduced a solution to this dilemma by integrating the Zstd compression algorithm (Zstandard) first developed for Facebook. This algorithm is able to ‘train’ on a batch of documents and learn the commonalities between them, and then compress new documents individually, which makes it possible to decompress them individually as well.
This new Documents Compression feature can be toggled for each document collection, as well as document revisions. By training on the first few documents in the collection, the Zstd algorithm generates a dictionary that it then applies to every new document. The algorithm monitors the compression ratio achieved for each document, and if it is not satisfactory, the algorithm goes back and re-trains on the most recently modified documents. After this, if the algorithm is able to compress the new document more efficiently, the dictionary is updated. In this way, the algorithm continuously adapts to the data you feed it.
Using this new feature, we often see compression ratios of over 50%, with negligible costs to performance. In fact, because of the reduced I/O usage, we often see an overall increase in the speed the system is able to process and handle operations.
Expand beyond one node in a matter of minutes. Achieve high availability, load balancing, and failover with just a few clicks.
With RavenDB clustering is easy!
In a matter of minutes, you can configure it without any expert knowledge using our GUI management studio.
The RavenDB clustering is build on top of two layers:
First, the cluster layer is managed by a consensus protocol called Raft. In CAP theorem it is CP (consistent and partition tolerant).
The second layer, the database layer, is AP (it is always available, even if there is a partition, and it's eventually consistent) and is handled by a gossip protocol between the databases on different nodes, forming multi-master mesh and replicating data between each other.
RavenDB utilizes the different layers for different purposes. At the cluster layer, the consensus protocol ensures that operators have the peace of mind of knowing that their commands are accepted and followed. At the database layer you know that RavenDB will never lose writes and will always keep your data safe.
Schedule tasks among the cluster and let the other nodes take over the work if needed.
Scheduling a backup, an ETL job, or any other ongoing process on just one node is not acceptable. If that node goes down for any reason, the show must go on. That’s is why RavenDB introduced cluster-wide tasks, assigned to a node, but owned by the cluster.
In the case of a failure of the assigned node, the RavenDB cluster will re-assign the work to another node, ensuring continuity of operations.
The following tasks can be created:
With cluster-wide tasks or as we are calling them Ongoing Tasks, you have a guarantee that only one cluster node is processing that job. With the high availability in place, you have assurance that the task will automatically switch to a new node if the old one is down.
Ensure your data is safe and sound, residing on a given number of nodes.
A cluster can assign a database to all the nodes in the cluster, or to just some of them. In a five node cluster, most databases will only reside on two or three of the nodes, since duplicating all information times five is usually excessive.
Instead, you'll typically spread the databases with a replication factor of two or three on the various nodes. If a node goes down (hardware failure, for example), the cluster will note that and if it fails to come back up quickly enough, will take steps to ensure that the number of live active replicas of the database is maintained.
The cluster will do that by adding another node in the cluster for the database, resulting in another copy of the data and ensuring that the configured number of replicas is maintained. When the failed node is brought up again, the cluster will determine whatever to keep the data on the old node or to use the new topology. Your operations team doesn't have to be on their toes at all times, RavenDB is constantly monitoring and acting on your behalf within the boundaries set by your administrators.
Trust our experience and sleep tight.
Running a production database cluster can be a daunting task. There are a lot of knobs to turn, especially when you have co-dependent settings and options.
Far from being a opaque system, RavenDB works very hard to externalize and make visible all the details about the system that your operations team needs, and is often deployed in a self managing option.
Features such as automatic failover, dynamic distribution of tasks and databases, multi master writes means that your operations team can sleep in peace, knowing that if anything happens, they can get to it in the morning.
RavenDB also continuously runs diagnostics and self checks. Each member of the cluster is verifying each other to ensure the health of the entire system. If anything troubling shows up, RavenDB will alert the administrator, and usually include the suggested fix.
Tested in production for the past decade, RavenDB is a mature product requiring minimal guidance on day to day issues.
Extend the transactionality beyond a single node.
Combine documents with Compare Exchange values in a single transaction to favor consistency over availability and ensure that changes are going to be applied in an identical manner across the cluster even in the presence of failures and network partitions.
No major code change is required, simply open the session with the TransactionMode
set to ClusterWide
and you have a cluster-wide transaction.
using (var session = store.OpenSession(new SessionOptions
{
TransactionMode = TransactionMode.ClusterWide
}))
{
var user = new User
{
FirstName = "John",
LastName = "Doe",
Email "johndoe@ravendb.net"
};
// create cluster wide unique users per email.
session.Store(user);
session.Advanced.ClusterTransaction.CreateCompareExchangeValue(user.Email, user.Id);
session.SaveChanges();
}
Replicate your data to an external node or connect clusters together.
Do you need to have offsite replica of your data?
Have you decided to connect geo-distributed clusters together?
Need to run resource intensive calculations on a separated node?
External Replication is a master-slave replication which continuously push the data to destination node in asynchronous manner whenever the data changes on the source node.
Synchronize with the master server on-demand.
Does your solution involves many instances requiring data synchronization with the main server?
Pull Replication will provide a secured master-slave replication initiated by the slave side to reduce the networking configuration fatigue needed.
For this purpose, in order to deploy slave nodes, only a single definition on the master server is required. Allowing you to connect numerous of nodes in matter of minutes without the additional overhead.
Replicate selected documents. Control databases read and write access to documents
Want to replicate documents, but not an entire database?
Do you have sensitive data that you need to limit access to?
Replication now allows you to configure both hub servers and sink servers to send and accept only certain documents. In other words, you can control which databases have read and write access to which documents. These documents can be specified not just by their document ID, but also all document IDs that begin with some prefix.
Imagine your application stores information for a hospital, and it has a central database and many other databases with limited access. Filtered Replication allows you to create a database for Dr. Alice Smith that has read access to this document in the central database:
doctors/Smith_A
And all documents in the central database with IDs that have this prefix:
pharmacy/medicines/
In addition, Dr. Smith’s database might have write access for all documents with this prefix:
prescriptions/Smith_A/
This feature allows you to plan far in advance which data will be read, replicated, and modified by whom.
Manage authentication and authorization via X.509 certificates. Secure your connectivity with SSL and TLS 1.2.
The safety of your data is always our top priority. We strive to make sure that anything your users share with you on the assumption of privacy will remain private. This does not only mean protecting your data on the disk drive (read more about this in Encryption feature), but also in transferring it securely over the wire. To achieve this, we introduced the ability to utilize the HTTPS with TLS 1.2 protocols using X.509 certificates which grants you enterprise-level security for your data during transfers over the network.
The certificates are also used to grant specific privileges to certificate holders allowing them to access only subsets of all databases on the server, or to execute operations that are allowed to one of the predefined roles (security clearances). All of this is configurable in a convenient way using our Management Studio GUI.
Keep your data protected.
Our encryption techniques are among the best on the market.
RavenDB comes with built-in encryption support that utilizes the modern XChaCha20-Poly1305 algorithm from a well-known and battle-tested encryption library called libsodium giving you the best of both worlds: security and performance in one go.
Store your backup safely
Monitor what is going on in your Server in the blink of an eye using our built-in Server Dashboard.
The database server should not be a mystery. We invested a lot of effort to expose as much valuable information as possible. We created a Server Dashboard, part of your free Community License, to enable you to monitor in real-time aggregate crucial information like CPU usage, memory consumption, state of the databases, server traffic, storage information and much more. Now it’s easier for you to gage the behavior of your server, and take necessary resolutions if needed.
Connect Zabbix or your favorite monitoring tool via our built-in SNMP support.
When you have dozens of servers to maintain, you can hook-up one tool to monitor them all. RavenDB comes with built-in support for SNMPv2 and SNMPv3, and exposes over 50 unique OIDs for you to take advantage of.
Turn on logging on-the-fly, and view them in the Studio. No need to restart the Server!
Schedule automatic backups to different external destinations.
RavenDB enables you to schedule full and/or incremental automatic backups, choose if those backups should be binary (slower creation, quicker restore) or JSON (quicker creation, slower restore), and choose at least one of the following destinations:
Use our built-in console commands to control your Server.
Issue JavaScript commands to modify your Server or database configuration on-the-fly without the need of any restarts.