394 lines
7.7 KiB
Go
394 lines
7.7 KiB
Go
package es
|
|
|
|
import (
|
|
"encoding/json"
|
|
"fmt"
|
|
"github.com/olivere/elastic/v7"
|
|
log "github.com/sirupsen/logrus"
|
|
"github.com/spf13/viper"
|
|
"io"
|
|
"iwarma.ru/console/correlator/config"
|
|
"testing"
|
|
"time"
|
|
)
|
|
|
|
func prepareElastic() {
|
|
viper.Set(config.ElasticUrl, "http://elasticsearch:9200")
|
|
viper.Set(config.ElasticRetryCount, 1)
|
|
viper.Set(config.ElasticUsername, "elastic")
|
|
viper.Set(config.ElasticPassword, "changeme")
|
|
viper.Set(config.Verbose, true)
|
|
viper.Set(config.ElasticAggregatedIndexName, "test-aggregated")
|
|
viper.Set(config.ElasticNormalizedIndexName, "test-normalized")
|
|
viper.Set(config.AggregatorIterationDuration, time.Second*2)
|
|
viper.Set(config.Threads, 10)
|
|
}
|
|
|
|
func SetupTest(t *testing.T) {
|
|
prepareElastic()
|
|
|
|
}
|
|
func TearDownTest(t *testing.T) {
|
|
client, err := NewElastic()
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
return
|
|
}
|
|
err = clearIndex("*", client)
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
}
|
|
del, err := client.client.DeleteIndex("*").Do(client.ctx)
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
}
|
|
if !del.Acknowledged {
|
|
t.Errorf("Got bad response. Indexs, wasn't deleted")
|
|
}
|
|
}
|
|
|
|
func clearIndex(index string, el *Elastic) error {
|
|
exists, err := el.client.IndexExists(index).Do(el.ctx)
|
|
if err != nil {
|
|
log.Errorf("%+v", err)
|
|
return err
|
|
}
|
|
|
|
if exists {
|
|
deleted, err := el.client.DeleteIndex(index).Do(el.ctx)
|
|
if err != nil {
|
|
log.Errorf("%+v", err)
|
|
}
|
|
|
|
if !deleted.Acknowledged {
|
|
log.Errorf("Index %v wasn't deleted", index)
|
|
return fmt.Errorf("index %v wasn't deleted", index)
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// Check that we can connect to local elastic
|
|
func TestNewElastic(t *testing.T) {
|
|
SetupTest(t)
|
|
defer TearDownTest(t)
|
|
|
|
_, err := NewElastic()
|
|
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
}
|
|
}
|
|
|
|
// Check that we heve an error if we can't connect to elastic
|
|
func TestNewElasticError(t *testing.T) {
|
|
SetupTest(t)
|
|
defer TearDownTest(t)
|
|
viper.Set(config.ElasticUrl, "http://example.com")
|
|
|
|
_, err := NewElastic()
|
|
|
|
if err == nil {
|
|
t.Errorf("%v", err)
|
|
}
|
|
viper.Set(config.ElasticUrl, "http://elasticsearch:9200")
|
|
}
|
|
|
|
// Check that we can create index
|
|
func TestCheckAndCreateIndexNoIndex(t *testing.T) {
|
|
SetupTest(t)
|
|
defer TearDownTest(t)
|
|
|
|
client, err := NewElastic()
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
return
|
|
}
|
|
|
|
indexName := "my_test_index"
|
|
|
|
// Check that we don't have such index
|
|
exist, err := client.client.IndexExists(indexName).Do(client.ctx)
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
}
|
|
|
|
if exist {
|
|
t.Errorf("Index %v already exist", indexName)
|
|
return
|
|
}
|
|
|
|
err = client.CheckAndCreateIndex(indexName)
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
return
|
|
}
|
|
mapp, err := client.client.GetMapping().Index(indexName).Do(client.ctx)
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
return
|
|
}
|
|
jsonMap, err := json.Marshal(mapp)
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
return
|
|
}
|
|
mappingStr := "{\"my_test_index\":{\"mappings\":{\"properties\":{\"destination_ip\":{\"fields\":{\"keyword\":{\"type\":\"keyword\"}},\"type\":\"text\"},\"sign_name\":{\"fields\":{\"keyword\":{\"type\":\"keyword\"}},\"type\":\"text\"},\"source_ip\":{\"fields\":{\"keyword\":{\"type\":\"keyword\"}},\"type\":\"text\"}}}}}"
|
|
if string(jsonMap) != mappingStr {
|
|
t.Errorf("Bad index mapping. Expected %v . Have %v", mappingStr, string(jsonMap))
|
|
return
|
|
}
|
|
// Check that we have it now
|
|
exist, err = client.client.IndexExists(indexName).Do(client.ctx)
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
}
|
|
|
|
if !exist {
|
|
t.Errorf("Index %v wasn't created", indexName)
|
|
return
|
|
}
|
|
|
|
}
|
|
|
|
// Check that if we call CheckAndCreateIndex on existing index it isn't change
|
|
func TestCheckAndCreateIndexHaveIndex(t *testing.T) {
|
|
SetupTest(t)
|
|
defer TearDownTest(t)
|
|
|
|
client, err := NewElastic()
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
return
|
|
}
|
|
|
|
indexName := "my_test_index"
|
|
|
|
// Now we need to check if we already have such index
|
|
exist, err := client.client.IndexExists(indexName).Do(client.ctx)
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
return
|
|
}
|
|
|
|
if !exist {
|
|
ret, err := client.client.CreateIndex(indexName).Do(client.ctx)
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
return
|
|
}
|
|
|
|
if !ret.Acknowledged {
|
|
t.Errorf("Index %v wasn't created", indexName)
|
|
return
|
|
}
|
|
}
|
|
|
|
// Now, check what function do
|
|
err = client.CheckAndCreateIndex(indexName)
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
}
|
|
|
|
// Check that index exist
|
|
exist, err = client.client.IndexExists(indexName).Do(client.ctx)
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
return
|
|
}
|
|
|
|
if !exist {
|
|
t.Errorf("Index %v doesn't exist", indexName)
|
|
}
|
|
|
|
}
|
|
|
|
func TestElasticQuery(t *testing.T) {
|
|
SetupTest(t)
|
|
defer TearDownTest(t)
|
|
|
|
client, err := NewElastic()
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
return
|
|
}
|
|
|
|
indexName := "my_test_index"
|
|
|
|
err = clearIndex(indexName, client)
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
return
|
|
}
|
|
|
|
err = client.CheckAndCreateIndex(indexName)
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
return
|
|
}
|
|
|
|
// Add some documents
|
|
type testStruct struct {
|
|
Id int
|
|
Value string
|
|
}
|
|
|
|
N := 10
|
|
|
|
bulk := client.client.Bulk()
|
|
|
|
for i := 0; i < N; i++ {
|
|
id := fmt.Sprintf("%v", i)
|
|
bulk = bulk.Add(elastic.NewBulkIndexRequest().Index(indexName).Id(id).Doc(testStruct{Id: i, Value: id}))
|
|
}
|
|
|
|
bulkResponse, err := bulk.Do(client.ctx)
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
return
|
|
}
|
|
|
|
if bulkResponse.Errors {
|
|
t.Errorf("%v", bulkResponse.Failed())
|
|
return
|
|
}
|
|
|
|
if len(bulkResponse.Indexed()) != N {
|
|
t.Errorf("Not all documents was added. Expect %v, got %v", N, len(bulkResponse.Indexed()))
|
|
return
|
|
}
|
|
|
|
// Need to wait until elastic is ready
|
|
time.Sleep(time.Second)
|
|
|
|
// Now, let's query them back
|
|
results, errs := client.Query(indexName, elastic.NewMatchAllQuery())
|
|
|
|
// Count results
|
|
resultCount := 0
|
|
for range results {
|
|
resultCount++
|
|
}
|
|
|
|
// Count errors
|
|
errorCount := 0
|
|
for err = range errs {
|
|
if err != nil {
|
|
log.Errorf("%v", err)
|
|
errorCount++
|
|
}
|
|
}
|
|
|
|
if resultCount != N {
|
|
t.Errorf("Got bad result count. Expect %v, got %v", N, resultCount)
|
|
}
|
|
|
|
if errorCount != 0 {
|
|
t.Errorf("Got some errors in scroll")
|
|
}
|
|
|
|
}
|
|
|
|
func TestElasticQueryScrollLimitParam(t *testing.T) {
|
|
SetupTest(t)
|
|
defer TearDownTest(t)
|
|
|
|
client, err := NewElastic()
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
return
|
|
}
|
|
|
|
indexName := "my_test_index"
|
|
|
|
err = clearIndex(indexName, client)
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
return
|
|
}
|
|
|
|
err = client.CheckAndCreateIndex(indexName)
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
return
|
|
}
|
|
|
|
// Add some documents
|
|
type testStruct struct {
|
|
Id int
|
|
Value string
|
|
}
|
|
|
|
N := 200
|
|
|
|
bulk := client.client.Bulk()
|
|
|
|
for i := 0; i < N; i++ {
|
|
id := fmt.Sprintf("%v", i)
|
|
bulk = bulk.Add(elastic.NewBulkIndexRequest().Index(indexName).Id(id).Doc(testStruct{Id: i, Value: id}))
|
|
}
|
|
|
|
bulkResponse, err := bulk.Do(client.ctx)
|
|
if err != nil {
|
|
t.Errorf("%v", err)
|
|
return
|
|
}
|
|
|
|
if bulkResponse.Errors {
|
|
t.Errorf("%v", bulkResponse.Failed())
|
|
return
|
|
}
|
|
|
|
if len(bulkResponse.Indexed()) != N {
|
|
t.Errorf("Not all documents was added. Expect %v, got %v", N, len(bulkResponse.Indexed()))
|
|
return
|
|
}
|
|
|
|
// Need to wait until elastic is ready
|
|
time.Sleep(time.Second)
|
|
|
|
// Prepare config
|
|
viper.Set(config.ScrollSize, 10)
|
|
// Get documents from query
|
|
query := elastic.NewMatchAllQuery()
|
|
hits := make(chan *elastic.SearchHit)
|
|
errs := make(chan error, 1)
|
|
scrollSize := viper.GetInt(config.ScrollSize)
|
|
go func() {
|
|
defer close(hits)
|
|
defer close(errs)
|
|
scroll := client.client.Scroll(indexName).Query(query)
|
|
scroll.Size(scrollSize)
|
|
|
|
for {
|
|
res, err := scroll.Do(client.ctx)
|
|
if err == io.EOF {
|
|
break
|
|
}
|
|
|
|
if len(res.Hits.Hits) != 10 {
|
|
t.Errorf("Scroll size is not 10")
|
|
return
|
|
}
|
|
|
|
if err != nil {
|
|
client.log.Errorf("Got error from scroll: %v", err)
|
|
errs <- err
|
|
break
|
|
}
|
|
|
|
for _, hit := range res.Hits.Hits {
|
|
select {
|
|
case hits <- hit:
|
|
case <-client.ctx.Done():
|
|
{
|
|
errs <- client.ctx.Err()
|
|
break
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}()
|
|
|
|
}
|