Browse Source

Initial commit for Cassandra storage engine.

pull/57/head
Sergey Petrunya 13 years ago
parent
commit
c15914f761
  1. 10
      mysql-test/include/have_cassandra.inc
  2. 1
      mysql-test/include/have_cassandra.opt
  3. 20
      mysql-test/r/cassandra.result
  4. 68
      mysql-test/t/cassandra.test
  5. 22
      storage/cassandra/CMakeLists.txt
  6. 271
      storage/cassandra/cassandra_se.cc
  7. 57
      storage/cassandra/cassandra_se.h
  8. 12871
      storage/cassandra/gen-cpp/Cassandra.cpp
  9. 5466
      storage/cassandra/gen-cpp/Cassandra.h
  10. 219
      storage/cassandra/gen-cpp/Cassandra_server.skeleton.cpp
  11. 18
      storage/cassandra/gen-cpp/cassandra_constants.cpp
  12. 26
      storage/cassandra/gen-cpp/cassandra_constants.h
  13. 3512
      storage/cassandra/gen-cpp/cassandra_types.cpp
  14. 2149
      storage/cassandra/gen-cpp/cassandra_types.h
  15. 727
      storage/cassandra/ha_cassandra.cc
  16. 218
      storage/cassandra/ha_cassandra.h

10
mysql-test/include/have_cassandra.inc

@ -0,0 +1,10 @@
#
# suite.pm will make sure that all tests including this file
# will be skipped unless innodb or xtradb is enabled
#
# The test below is redundant
if (`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'cassandra' AND support IN ('YES', 'DEFAULT', 'ENABLED')`)
{
--skip Test requires Cassandra.
}

1
mysql-test/include/have_cassandra.opt

@ -0,0 +1 @@
--cassandra=on

20
mysql-test/r/cassandra.result

@ -0,0 +1,20 @@
drop table if exists t0, t1;
create table t1 (a int) engine=cassandra
thrift_host='localhost' keyspace='foo' column_family='colfam';
ERROR 42000: Incorrect column name 'First column must be named 'rowkey''
create table t1 (a int primary key, b int) engine=cassandra
thrift_host='localhost' keyspace='foo' column_family='colfam';
ERROR 42000: Incorrect column name 'First column must be named 'rowkey''
create table t1 (rowkey char(10) primary key, column1 char(10)) engine=cassandra
thrift_host='127.0.0.2' keyspace='foo' column_family='colfam';
ERROR HY000: Unable to connect to foreign data source: connect() failed: Connection refused [1]
create table t1 (rowkey char(10) primary key, column1 char(10)) engine=cassandra
thrift_host='localhost' keyspace='no_such_keyspace' column_family='colfam';
ERROR HY000: Unable to connect to foreign data source: Default TException. [Keyspace no_such_keyspace does not exist]
create table t1 (rowkey char(10) primary key, column1 char(10)) engine=cassandra
thrift_host='localhost' keyspace='no_such_keyspace';
ERROR HY000: Can't create table 'test.t1' (errno: 140)
create table t1 (rowkey char(36) primary key, column1 char(60)) engine=cassandra
thrift_host='localhost' keyspace='mariadbtest' column_family='cf1';
insert into t1 values ('key0', 'data1');
drop table t1;

68
mysql-test/t/cassandra.test

@ -0,0 +1,68 @@
#
# Tests for cassandra storage engine
#
--source include/have_cassandra.inc
--disable_warnings
drop table if exists t0, t1;
--enable_warnings
# Test various errors on table creation.
--error ER_WRONG_COLUMN_NAME
create table t1 (a int) engine=cassandra
thrift_host='localhost' keyspace='foo' column_family='colfam';
--error ER_WRONG_COLUMN_NAME
create table t1 (a int primary key, b int) engine=cassandra
thrift_host='localhost' keyspace='foo' column_family='colfam';
--error ER_CONNECT_TO_FOREIGN_DATA_SOURCE
create table t1 (rowkey char(10) primary key, column1 char(10)) engine=cassandra
thrift_host='127.0.0.2' keyspace='foo' column_family='colfam';
--error ER_CONNECT_TO_FOREIGN_DATA_SOURCE
create table t1 (rowkey char(10) primary key, column1 char(10)) engine=cassandra
thrift_host='localhost' keyspace='no_such_keyspace' column_family='colfam';
# No column family specified
--error ER_CANT_CREATE_TABLE
create table t1 (rowkey char(10) primary key, column1 char(10)) engine=cassandra
thrift_host='localhost' keyspace='no_such_keyspace';
############################################################################
## Cassandra initialization:
############################################################################
--disable_parsing
./cqlsh --cql3
CREATE KEYSPACE mariadbtest
WITH strategy_class = 'org.apache.cassandra.locator.SimpleStrategy'
AND strategy_options:replication_factor='1';
USE mariadbtest;
create columnfamily cf1 ( pk varchar primary key, data1 varchar);
--enable_parsing
############################################################################
## Cassandra initialization ends
############################################################################
# Now, create a table for real and insert data
create table t1 (rowkey char(36) primary key, column1 char(60)) engine=cassandra
thrift_host='localhost' keyspace='mariadbtest' column_family='cf1';
insert into t1 values ('key0', 'data1');
drop table t1;
############################################################################
## Cassandra cleanup
############################################################################
--disable_parsing
drop columnfamily cf1;
--enable_parsing
############################################################################
## Cassandra cleanup ends
############################################################################

22
storage/cassandra/CMakeLists.txt

@ -0,0 +1,22 @@
SET(cassandra_sources
ha_cassandra.cc
ha_cassandra.h
cassandra_se.h
cassandra_se.cc
gen-cpp/Cassandra.cpp
gen-cpp/cassandra_types.h
gen-cpp/cassandra_types.cpp
gen-cpp/cassandra_constants.h
gen-cpp/cassandra_constants.cpp
gen-cpp/Cassandra.h)
#INCLUDE_DIRECTORIES(BEFORE ${Boost_INCLUDE_DIRS})
INCLUDE_DIRECTORIES(AFTER /home/psergey/cassandra/thrift/include/thrift/)
#
STRING(REPLACE "-fno-exceptions" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
STRING(REPLACE "-fno-implicit-templates" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
#LINK_DIRECTORIES(/home/psergey/cassandra/thrift/lib)
MYSQL_ADD_PLUGIN(cassandra ${cassandra_sources} STORAGE_ENGINE LINK_LIBRARIES thrift)
# was: STORAGE_ENGINE MANDATORY

271
storage/cassandra/cassandra_se.cc

@ -0,0 +1,271 @@
// Cassandra includes:
#include <inttypes.h>
#include <netinet/in.h>
#include <sys/time.h>
#include <stdio.h>
#include <stdarg.h>
#include "Thrift.h"
#include "transport/TSocket.h"
#include "transport/TTransport.h"
#include "transport/TBufferTransports.h"
#include "protocol/TProtocol.h"
#include "protocol/TBinaryProtocol.h"
#include "gen-cpp/Cassandra.h"
// cassandra includes end
#include "cassandra_se.h"
using namespace std;
using namespace apache::thrift;
using namespace apache::thrift::transport;
using namespace apache::thrift::protocol;
using namespace org::apache::cassandra;
void Cassandra_se_interface::print_error(const char *format, ...)
{
va_list ap;
va_start(ap, format);
// it's not a problem if output was truncated
vsnprintf(err_buffer, sizeof(err_buffer), format, ap);
va_end(ap);
}
/*
Implementation of connection to one Cassandra column family (ie., table)
*/
class Cassandra_se_impl: public Cassandra_se_interface
{
CassandraClient *cass; /* Connection to cassandra */
ConsistencyLevel::type cur_consistency_level;
std::string column_family;
std::string keyspace;
/* DDL checks */
KsDef ks_def; /* KeySpace we're using (TODO: put this in table->share) */
CfDef cf_def; /* Column family we're using (TODO: put in table->share)*/
std::vector<ColumnDef>::iterator column_ddl_it;
/* The list that was returned by the last key lookup */
std::vector<ColumnOrSuperColumn> col_supercol_vec;
public:
Cassandra_se_impl() : cass(NULL) {}
virtual ~Cassandra_se_impl(){ delete cass; }
bool connect(const char *host, const char *keyspace);
virtual void set_column_family(const char *cfname)
{
column_family.assign(cfname);
}
virtual bool insert(NameAndValue *fields);
virtual bool get_slice(char *key, size_t key_len, NameAndValue *row, bool *found);
/* Functions to enumerate ColumnFamily's DDL data */
bool setup_ddl_checks();
void first_ddl_column();
bool next_ddl_column(char **name, int *name_len, char **value, int *value_len);
};
Cassandra_se_interface *get_cassandra_se()
{
return new Cassandra_se_impl;
}
#define CASS_TRY(x) try { \
x; \
}catch(TTransportException te){ \
print_error("%s [%d]", te.what(), te.getType()); \
}catch(InvalidRequestException ire){ \
print_error("%s [%s]", ire.what(), ire.why.c_str()); \
}catch(NotFoundException nfe){ \
print_error("%s", nfe.what()); \
} catch(...) { \
print_error("Unknown Exception"); \
}
bool Cassandra_se_impl::connect(const char *host, const char *keyspace_arg)
{
bool res= true;
keyspace.assign(keyspace_arg);
try {
boost::shared_ptr<TTransport> socket =
boost::shared_ptr<TSocket>(new TSocket(host, 9160));
boost::shared_ptr<TTransport> tr =
boost::shared_ptr<TFramedTransport>(new TFramedTransport (socket));
boost::shared_ptr<TProtocol> p =
boost::shared_ptr<TBinaryProtocol>(new TBinaryProtocol(tr));
cass= new CassandraClient(p);
tr->open();
cass->set_keyspace(keyspace_arg);
res= false; // success
}catch(TTransportException te){
print_error("%s [%d]", te.what(), te.getType());
}catch(InvalidRequestException ire){
print_error("%s [%s]", ire.what(), ire.why.c_str());
}catch(NotFoundException nfe){
print_error("%s", nfe.what());
}
catch(...) {
print_error("Unknown Exception");
}
// For now:
cur_consistency_level= ConsistencyLevel::ONE;
if (setup_ddl_checks())
res= true;
return res;
}
bool Cassandra_se_impl::setup_ddl_checks()
{
try {
cass->describe_keyspace(ks_def, keyspace);
std::vector<CfDef>::iterator it;
for (it= ks_def.cf_defs.begin(); it < ks_def.cf_defs.end(); it++)
{
cf_def= *it;
if (!cf_def.name.compare(column_family))
return false;
}
print_error("describe_keyspace() didn't return our column family");
} catch (InvalidRequestException ire) {
print_error("%s [%s]", ire.what(), ire.why.c_str());
} catch (NotFoundException nfe) {
print_error("keyspace not found: %s", nfe.what());
}
return true;
}
void Cassandra_se_impl::first_ddl_column()
{
column_ddl_it= cf_def.column_metadata.begin();
}
bool Cassandra_se_impl::next_ddl_column(char **name, int *name_len,
char **type, int *type_len)
{
if (column_ddl_it == cf_def.column_metadata.end())
return true;
*name= (char*)(*column_ddl_it).name.c_str();
*name_len= (*column_ddl_it).name.length();
*type= (char*)(*column_ddl_it).validation_class.c_str();
*type_len= (*column_ddl_it).validation_class.length();
column_ddl_it++;
return false;
}
bool Cassandra_se_impl::insert(NameAndValue *fields)
{
ColumnParent cparent;
cparent.column_family= column_family;
Column c;
struct timeval td;
gettimeofday(&td, NULL);
int64_t ms = td.tv_sec;
ms = ms * 1000;
int64_t usec = td.tv_usec;
usec = usec / 1000;
ms += usec;
c.timestamp = ms;
c.__isset.timestamp = true;
std::string key;
key.assign(fields->value, fields->value_len);
fields++;
bool res= false;
try {
/* TODO: switch to batch_mutate(). Or, even to CQL? */
// TODO: what should INSERT table (co1, col2) VALUES ('foo', 'bar') mean?
// in SQL, it sets all columns.. what should it mean here? can we have
// it to work only for specified columns? (if yes, what do for
// VALUES()?)
c.__isset.value= true;
for(;fields->name; fields++)
{
c.name.assign(fields->name);
c.value.assign(fields->value, fields->value_len);
cass->insert(key, cparent, c, ConsistencyLevel::ONE);
}
} catch (...) {
res= true;
}
return res;
}
bool Cassandra_se_impl::get_slice(char *key, size_t key_len, NameAndValue *row, bool *found)
{
ColumnParent cparent;
cparent.column_family= column_family;
std::string rowkey_str;
rowkey_str.assign(key, key_len);
SlicePredicate slice_pred;
SliceRange sr;
sr.start = "";
sr.finish = "";
slice_pred.__set_slice_range(sr);
try {
std::vector<ColumnOrSuperColumn> &res= col_supercol_vec;
cass->get_slice(res, rowkey_str, cparent, slice_pred, ConsistencyLevel::ONE);
*found= true;
std::vector<ColumnOrSuperColumn>::iterator it;
if (res.size() == 0)
{
/*
No columns found. Cassandra doesn't allow records without any column =>
this means the seach key doesn't exist
*/
*found= false;
return false;
}
for (it= res.begin(); it < res.end(); it++)
{
ColumnOrSuperColumn cs= *it;
if (!cs.__isset.column)
return true;
row->name= (char*)cs.column.name.c_str();
row->value= (char*)cs.column.value.c_str();
row->value_len= cs.column.value.length();
row++;
}
row->name= NULL;
} catch (InvalidRequestException ire) {
return true;
} catch (UnavailableException ue) {
return true;
} catch (TimedOutException te) {
return true;
}
return false;
}

57
storage/cassandra/cassandra_se.h

@ -0,0 +1,57 @@
/*
This file is a "bridge" interface between cassandra+Thrift and MariaDB.
It is #included by both sides, so it must itself include neither (including
both together causes compile errors due to conflicts).
*/
/*
Storage for (name,value) pairs. name==NULL means 'non-object'.
This should be used for
- shipping data from sql to cassandra for INSERTs
- shipping data from cassandra to SQL for record reads.
*/
class NameAndValue
{
public:
char *name;
char *value;
size_t value_len;
};
/*
Interface to one cassandra column family, i.e. one 'table'
*/
class Cassandra_se_interface
{
public:
Cassandra_se_interface() { err_buffer[0]=0; }
virtual ~Cassandra_se_interface(){};
/* Init */
virtual bool connect(const char *host, const char *port)=0;
virtual void set_column_family(const char *cfname) = 0;
/* Check underlying DDL */
virtual bool setup_ddl_checks()=0;
virtual void first_ddl_column()=0;
virtual bool next_ddl_column(char **name, int *name_len, char **value,
int *value_len)=0;
/* Writes */
virtual bool insert(NameAndValue *fields)=0;
/* Reads */
virtual bool get_slice(char *key, size_t key_len, NameAndValue *row, bool *found)=0 ;
/* Passing error messages up to ha_cassandra */
char err_buffer[512];
const char *error_str() { return err_buffer; }
void print_error(const char *format, ...);
};
Cassandra_se_interface *get_cassandra_se();

12871
storage/cassandra/gen-cpp/Cassandra.cpp
File diff suppressed because it is too large
View File

5466
storage/cassandra/gen-cpp/Cassandra.h
File diff suppressed because it is too large
View File

219
storage/cassandra/gen-cpp/Cassandra_server.skeleton.cpp

@ -0,0 +1,219 @@
// This autogenerated skeleton file illustrates how to build a server.
// You should copy it to another filename to avoid overwriting it.
#include "Cassandra.h"
#include <protocol/TBinaryProtocol.h>
#include <server/TSimpleServer.h>
#include <transport/TServerSocket.h>
#include <transport/TBufferTransports.h>
using namespace ::apache::thrift;
using namespace ::apache::thrift::protocol;
using namespace ::apache::thrift::transport;
using namespace ::apache::thrift::server;
using boost::shared_ptr;
using namespace ::org::apache::cassandra;
class CassandraHandler : virtual public CassandraIf {
public:
CassandraHandler() {
// Your initialization goes here
}
void login(const AuthenticationRequest& auth_request) {
// Your implementation goes here
printf("login\n");
}
void set_keyspace(const std::string& keyspace) {
// Your implementation goes here
printf("set_keyspace\n");
}
void get(ColumnOrSuperColumn& _return, const std::string& key, const ColumnPath& column_path, const ConsistencyLevel::type consistency_level) {
// Your implementation goes here
printf("get\n");
}
void get_slice(std::vector<ColumnOrSuperColumn> & _return, const std::string& key, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level) {
// Your implementation goes here
printf("get_slice\n");
}
int32_t get_count(const std::string& key, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level) {
// Your implementation goes here
printf("get_count\n");
}
void multiget_slice(std::map<std::string, std::vector<ColumnOrSuperColumn> > & _return, const std::vector<std::string> & keys, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level) {
// Your implementation goes here
printf("multiget_slice\n");
}
void multiget_count(std::map<std::string, int32_t> & _return, const std::vector<std::string> & keys, const ColumnParent& column_parent, const SlicePredicate& predicate, const ConsistencyLevel::type consistency_level) {
// Your implementation goes here
printf("multiget_count\n");
}
void get_range_slices(std::vector<KeySlice> & _return, const ColumnParent& column_parent, const SlicePredicate& predicate, const KeyRange& range, const ConsistencyLevel::type consistency_level) {
// Your implementation goes here
printf("get_range_slices\n");
}
void get_paged_slice(std::vector<KeySlice> & _return, const std::string& column_family, const KeyRange& range, const std::string& start_column, const ConsistencyLevel::type consistency_level) {
// Your implementation goes here
printf("get_paged_slice\n");
}
void get_indexed_slices(std::vector<KeySlice> & _return, const ColumnParent& column_parent, const IndexClause& index_clause, const SlicePredicate& column_predicate, const ConsistencyLevel::type consistency_level) {
// Your implementation goes here
printf("get_indexed_slices\n");
}
void insert(const std::string& key, const ColumnParent& column_parent, const Column& column, const ConsistencyLevel::type consistency_level) {
// Your implementation goes here
printf("insert\n");
}
void add(const std::string& key, const ColumnParent& column_parent, const CounterColumn& column, const ConsistencyLevel::type consistency_level) {
// Your implementation goes here
printf("add\n");
}
void remove(const std::string& key, const ColumnPath& column_path, const int64_t timestamp, const ConsistencyLevel::type consistency_level) {
// Your implementation goes here
printf("remove\n");
}
void remove_counter(const std::string& key, const ColumnPath& path, const ConsistencyLevel::type consistency_level) {
// Your implementation goes here
printf("remove_counter\n");
}
void batch_mutate(const std::map<std::string, std::map<std::string, std::vector<Mutation> > > & mutation_map, const ConsistencyLevel::type consistency_level) {
// Your implementation goes here
printf("batch_mutate\n");
}
void truncate(const std::string& cfname) {
// Your implementation goes here
printf("truncate\n");
}
void describe_schema_versions(std::map<std::string, std::vector<std::string> > & _return) {
// Your implementation goes here
printf("describe_schema_versions\n");
}
void describe_keyspaces(std::vector<KsDef> & _return) {
// Your implementation goes here
printf("describe_keyspaces\n");
}
void describe_cluster_name(std::string& _return) {
// Your implementation goes here
printf("describe_cluster_name\n");
}
void describe_version(std::string& _return) {
// Your implementation goes here
printf("describe_version\n");
}
void describe_ring(std::vector<TokenRange> & _return, const std::string& keyspace) {
// Your implementation goes here
printf("describe_ring\n");
}
void describe_token_map(std::map<std::string, std::string> & _return) {
// Your implementation goes here
printf("describe_token_map\n");
}
void describe_partitioner(std::string& _return) {
// Your implementation goes here
printf("describe_partitioner\n");
}
void describe_snitch(std::string& _return) {
// Your implementation goes here
printf("describe_snitch\n");
}
void describe_keyspace(KsDef& _return, const std::string& keyspace) {
// Your implementation goes here
printf("describe_keyspace\n");
}
void describe_splits(std::vector<std::string> & _return, const std::string& cfName, const std::string& start_token, const std::string& end_token, const int32_t keys_per_split) {
// Your implementation goes here
printf("describe_splits\n");
}
void system_add_column_family(std::string& _return, const CfDef& cf_def) {
// Your implementation goes here
printf("system_add_column_family\n");
}
void system_drop_column_family(std::string& _return, const std::string& column_family) {
// Your implementation goes here
printf("system_drop_column_family\n");
}
void system_add_keyspace(std::string& _return, const KsDef& ks_def) {
// Your implementation goes here
printf("system_add_keyspace\n");
}
void system_drop_keyspace(std::string& _return, const std::string& keyspace) {
// Your implementation goes here
printf("system_drop_keyspace\n");
}
void system_update_keyspace(std::string& _return, const KsDef& ks_def) {
// Your implementation goes here
printf("system_update_keyspace\n");
}
void system_update_column_family(std::string& _return, const CfDef& cf_def) {
// Your implementation goes here
printf("system_update_column_family\n");
}
void execute_cql_query(CqlResult& _return, const std::string& query, const Compression::type compression) {
// Your implementation goes here
printf("execute_cql_query\n");
}
void prepare_cql_query(CqlPreparedResult& _return, const std::string& query, const Compression::type compression) {
// Your implementation goes here
printf("prepare_cql_query\n");
}
void execute_prepared_cql_query(CqlResult& _return, const int32_t itemId, const std::vector<std::string> & values) {
// Your implementation goes here
printf("execute_prepared_cql_query\n");
}
void set_cql_version(const std::string& version) {
// Your implementation goes here
printf("set_cql_version\n");
}
};
int main(int argc, char **argv) {
int port = 9090;
shared_ptr<CassandraHandler> handler(new CassandraHandler());
shared_ptr<TProcessor> processor(new CassandraProcessor(handler));
shared_ptr<TServerTransport> serverTransport(new TServerSocket(port));
shared_ptr<TTransportFactory> transportFactory(new TBufferedTransportFactory());
shared_ptr<TProtocolFactory> protocolFactory(new TBinaryProtocolFactory());
TSimpleServer server(processor, serverTransport, transportFactory, protocolFactory);
server.serve();
return 0;
}

18
storage/cassandra/gen-cpp/cassandra_constants.cpp

@ -0,0 +1,18 @@
/**
* Autogenerated by Thrift Compiler (0.8.0)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
#include "cassandra_constants.h"
namespace org { namespace apache { namespace cassandra {
const cassandraConstants g_cassandra_constants;
cassandraConstants::cassandraConstants() {
cassandra_const_VERSION = "19.32.0";
}
}}} // namespace

26
storage/cassandra/gen-cpp/cassandra_constants.h

@ -0,0 +1,26 @@
/**
* Autogenerated by Thrift Compiler (0.8.0)
*
* DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
* @generated
*/
#ifndef cassandra_CONSTANTS_H
#define cassandra_CONSTANTS_H
#include "cassandra_types.h"
namespace org { namespace apache { namespace cassandra {
class cassandraConstants {
public:
cassandraConstants();
// std::string VERSION;
char* cassandra_const_VERSION;
};
extern const cassandraConstants g_cassandra_constants;
}}} // namespace
#endif

3512
storage/cassandra/gen-cpp/cassandra_types.cpp
File diff suppressed because it is too large
View File

2149
storage/cassandra/gen-cpp/cassandra_types.h
File diff suppressed because it is too large
View File

727
storage/cassandra/ha_cassandra.cc

@ -0,0 +1,727 @@
/*
MP AB copyrights
*/
#ifdef USE_PRAGMA_IMPLEMENTATION
#pragma implementation // gcc: Class implementation
#endif
#include <mysql/plugin.h>
#include "ha_cassandra.h"
#include "sql_class.h"
static handler *cassandra_create_handler(handlerton *hton,
TABLE_SHARE *table,
MEM_ROOT *mem_root);
handlerton *cassandra_hton;
/*
Hash used to track the number of open tables; variable for example share
methods
*/
static HASH cassandra_open_tables;
/* The mutex used to init the hash; variable for example share methods */
mysql_mutex_t cassandra_mutex;
/**
Structure for CREATE TABLE options (table options).
It needs to be called ha_table_option_struct.
The option values can be specified in the CREATE TABLE at the end:
CREATE TABLE ( ... ) *here*
*/
struct ha_table_option_struct
{
const char *host;
const char *keyspace;
const char *column_family;
};
ha_create_table_option cassandra_table_option_list[]=
{
/*
one option that takes an arbitrary string
*/
HA_TOPTION_STRING("thrift_host", host),
HA_TOPTION_STRING("keyspace", keyspace),
HA_TOPTION_STRING("column_family", column_family),
HA_TOPTION_END
};
/**
@brief
Function we use in the creation of our hash to get key.
*/
static uchar* cassandra_get_key(CASSANDRA_SHARE *share, size_t *length,
my_bool not_used __attribute__((unused)))
{
*length=share->table_name_length;
return (uchar*) share->table_name;
}
#ifdef HAVE_PSI_INTERFACE
static PSI_mutex_key ex_key_mutex_example, ex_key_mutex_CASSANDRA_SHARE_mutex;
static PSI_mutex_info all_cassandra_mutexes[]=
{
{ &ex_key_mutex_example, "cassandra", PSI_FLAG_GLOBAL},
{ &ex_key_mutex_CASSANDRA_SHARE_mutex, "CASSANDRA_SHARE::mutex", 0}
};
static void init_cassandra_psi_keys()
{
const char* category= "cassandra";
int count;
if (PSI_server == NULL)
return;
count= array_elements(all_cassandra_mutexes);
PSI_server->register_mutex(category, all_cassandra_mutexes, count);
}
#endif
static int cassandra_init_func(void *p)
{
DBUG_ENTER("cassandra_init_func");
#ifdef HAVE_PSI_INTERFACE
init_cassandra_psi_keys();
#endif
cassandra_hton= (handlerton *)p;
mysql_mutex_init(ex_key_mutex_example, &cassandra_mutex, MY_MUTEX_INIT_FAST);
(void) my_hash_init(&cassandra_open_tables,system_charset_info,32,0,0,
(my_hash_get_key) cassandra_get_key,0,0);
cassandra_hton->state= SHOW_OPTION_YES;
cassandra_hton->create= cassandra_create_handler;
cassandra_hton->flags= HTON_CAN_RECREATE;
cassandra_hton->table_options= cassandra_table_option_list;
//cassandra_hton->field_options= example_field_option_list;
cassandra_hton->field_options= NULL;
DBUG_RETURN(0);
}
static int cassandra_done_func(void *p)
{
int error= 0;
DBUG_ENTER("cassandra_done_func");
if (cassandra_open_tables.records)
error= 1;
my_hash_free(&cassandra_open_tables);
mysql_mutex_destroy(&cassandra_mutex);
DBUG_RETURN(error);
}
/**
@brief
Example of simple lock controls. The "share" it creates is a
structure we will pass to each cassandra handler. Do you have to have
one of these? Well, you have pieces that are used for locking, and
they are needed to function.
*/
static CASSANDRA_SHARE *get_share(const char *table_name, TABLE *table)
{
CASSANDRA_SHARE *share;
uint length;
char *tmp_name;
mysql_mutex_lock(&cassandra_mutex);
length=(uint) strlen(table_name);
if (!(share=(CASSANDRA_SHARE*) my_hash_search(&cassandra_open_tables,
(uchar*) table_name,
length)))
{
if (!(share=(CASSANDRA_SHARE *)
my_multi_malloc(MYF(MY_WME | MY_ZEROFILL),
&share, sizeof(*share),
&tmp_name, length+1,
NullS)))
{
mysql_mutex_unlock(&cassandra_mutex);
return NULL;
}
share->use_count=0;
share->table_name_length=length;
share->table_name=tmp_name;
strmov(share->table_name,table_name);
if (my_hash_insert(&cassandra_open_tables, (uchar*) share))
goto error;
thr_lock_init(&share->lock);
mysql_mutex_init(ex_key_mutex_CASSANDRA_SHARE_mutex,
&share->mutex, MY_MUTEX_INIT_FAST);
}
share->use_count++;
mysql_mutex_unlock(&cassandra_mutex);
return share;
error:
mysql_mutex_destroy(&share->mutex);
my_free(share);
return NULL;
}
/**
@brief
Free lock controls. We call this whenever we close a table. If the table had
the last reference to the share, then we free memory associated with it.
*/
static int free_share(CASSANDRA_SHARE *share)
{
mysql_mutex_lock(&cassandra_mutex);
if (!--share->use_count)
{
my_hash_delete(&cassandra_open_tables, (uchar*) share);
thr_lock_delete(&share->lock);
mysql_mutex_destroy(&share->mutex);
my_free(share);
}
mysql_mutex_unlock(&cassandra_mutex);
return 0;
}
static handler* cassandra_create_handler(handlerton *hton,
TABLE_SHARE *table,
MEM_ROOT *mem_root)
{
return new (mem_root) ha_cassandra(hton, table);
}
ha_cassandra::ha_cassandra(handlerton *hton, TABLE_SHARE *table_arg)
:handler(hton, table_arg),
se(NULL), names_and_vals(NULL)
{}
static const char *ha_cassandra_exts[] = {
NullS
};
const char **ha_cassandra::bas_ext() const
{
return ha_cassandra_exts;
}
int ha_cassandra::open(const char *name, int mode, uint test_if_locked)
{
DBUG_ENTER("ha_cassandra::open");
if (!(share = get_share(name, table)))
DBUG_RETURN(1);
thr_lock_data_init(&share->lock,&lock,NULL);
ha_table_option_struct *options= table->s->option_struct;
fprintf(stderr, "ha_cass: open thrift_host=%s keyspace=%s column_family=%s\n",
options->host, options->keyspace, options->column_family);
DBUG_ASSERT(!se);
if (!options->host || !options->keyspace || !options->column_family)
DBUG_RETURN(HA_WRONG_CREATE_OPTION);
se= get_cassandra_se();
se->set_column_family(options->column_family);
if (se->connect(options->host, options->keyspace))
{
my_error(ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), se->error_str());
DBUG_RETURN(HA_ERR_NO_CONNECTION);
}
DBUG_RETURN(0);
}
int ha_cassandra::close(void)
{
DBUG_ENTER("ha_cassandra::close");
delete se;
se= NULL;
if (names_and_vals)
{
my_free(names_and_vals);
names_and_vals= NULL;
}
DBUG_RETURN(free_share(share));
}
/**
@brief
create() is called to create a database. The variable name will have the name
of the table.
@details
When create() is called you do not need to worry about
opening the table. Also, the .frm file will have already been
created so adjusting create_info is not necessary. You can overwrite
the .frm file at this point if you wish to change the table
definition, but there are no methods currently provided for doing
so.
Called from handle.cc by ha_create_table().
@see
ha_create_table() in handle.cc
*/
int ha_cassandra::create(const char *name, TABLE *table_arg,
HA_CREATE_INFO *create_info)
{
ha_table_option_struct *options= table_arg->s->option_struct;
DBUG_ENTER("ha_cassandra::create");
DBUG_ASSERT(options);
//psergey-todo: This is called for CREATE TABLE... check options here.
/*
if (table_arg->s->fields != 2)
{
my_error(ER_WRONG_COLUMN_NAME, MYF(0), "The table must have two fields");
DBUG_RETURN(HA_WRONG_CREATE_OPTION);
}
*/
Field **pfield= table_arg->s->field;
if (strcmp((*pfield)->field_name, "rowkey"))
{
my_error(ER_WRONG_COLUMN_NAME, MYF(0), "First column must be named 'rowkey'");
DBUG_RETURN(HA_WRONG_CREATE_OPTION);
}
if (table_arg->s->keys != 1 || table_arg->s->primary_key !=0 ||
table_arg->key_info[0].key_parts != 1 ||
table_arg->key_info[0].key_part[0].fieldnr != 1)
{
my_error(ER_WRONG_COLUMN_NAME, MYF(0), "Table must have one PRIMARY KEY(rowkey)");
DBUG_RETURN(HA_WRONG_CREATE_OPTION);
}
/*
pfield++;
if (strcmp((*pfield)->field_name, "data"))
{
my_error(ER_WRONG_COLUMN_NAME, MYF(0), "Second column must be named 'data'");
DBUG_RETURN(HA_WRONG_CREATE_OPTION);
}
*/
#ifndef DBUG_OFF
/*
DBUG_PRINT("info", ("strparam: '%-.64s' ullparam: %llu enumparam: %u "\
"boolparam: %u",
(options->strparam ? options->strparam : "<NULL>"),
options->ullparam, options->enumparam, options->boolparam));
psergey-todo: check table definition!
for (Field **field= table_arg->s->field; *field; field++)
{
ha_field_option_struct *field_options= (*field)->option_struct;
DBUG_ASSERT(field_options);
DBUG_PRINT("info", ("field: %s complex: '%-.64s'",
(*field)->field_name,
(field_options->complex_param_to_parse_it_in_engine ?
field_options->complex_param_to_parse_it_in_engine :
"<NULL>")));
}
*/
#endif
DBUG_ASSERT(!se);
if (!options->host || !options->keyspace || !options->column_family)
DBUG_RETURN(HA_WRONG_CREATE_OPTION);
se= get_cassandra_se();
se->set_column_family(options->column_family);
if (se->connect(options->host, options->keyspace))
{
my_error(ER_CONNECT_TO_FOREIGN_DATA_SOURCE, MYF(0), se->error_str());
DBUG_RETURN(HA_ERR_NO_CONNECTION);
}
/*
TODO: what about mapping the primary key? It has a 'type', too...
see CfDef::key_validation_class ? see also CfDef::key_alias?
*/
se->first_ddl_column();
char *col_name;
int col_name_len;
char *col_type;
int col_type_len;
while (!se->next_ddl_column(&col_name, &col_name_len, &col_type,
&col_type_len))
{
/* Mapping for the 1st field is already known */
for (Field **field= table_arg->s->field + 1; *field; field++)
{
if (!strcmp((*field)->field_name, col_name))
{
//map_field_to_type(field, col_type);
}
}
}
DBUG_RETURN(0);
}
/*
Mapping needs to
- copy value from MySQL record to Thrift buffer
- copy value from Thrift bufer to MySQL record..
*/
const char * const validator_bigint="org.apache.cassandra.db.marshal.LongType";
const char * const validator_int="org.apache.cassandra.db.marshal.Int32Type";
const char * const validator_counter= "org.apache.cassandra.db.marshal.CounterColumnType";
const char * const validator_float= "org.apache.cassandra.db.marshal.FloatType";
const char * const validator_double= "org.apache.cassandra.db.marshal.DoubleType";
void map_field_to_type(Field *field, const char *validator_name)
{
switch(field->type()) {
case MYSQL_TYPE_TINY:
case MYSQL_TYPE_SHORT:
case MYSQL_TYPE_LONG:
case MYSQL_TYPE_LONGLONG:
if (!strcmp(validator_name, validator_bigint))
{
//setup bigint validator
}
break;
case MYSQL_TYPE_FLOAT:
if (!strcmp(validator_name, validator_float))
break;
case MYSQL_TYPE_DOUBLE:
if (!strcmp(validator_name, validator_double))
break;
default:
DBUG_ASSERT(0);
}
}
void store_key_image_to_rec(Field *field, uchar *ptr, uint len);
int ha_cassandra::index_read_map(uchar *buf, const uchar *key,
key_part_map keypart_map,
enum ha_rkey_function find_flag)
{
int rc;
DBUG_ENTER("ha_cassandra::index_read_map");
if (find_flag != HA_READ_KEY_EXACT)
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
// todo: decode the search key.
uint key_len= calculate_key_len(table, active_index, key, keypart_map);
store_key_image_to_rec(table->field[0], (uchar*)key, key_len);
char buff[256];
String tmp(buff,sizeof(buff), &my_charset_bin);
tmp.length(0);
String *str;
str= table->field[0]->val_str(&tmp);
bool found;
if (se->get_slice((char*)str->ptr(), str->length(), get_names_and_vals(), &found))
rc= HA_ERR_INTERNAL_ERROR;
else
{
if (found)
{
//NameAndValue *nv= get_names_and_vals();
// TODO: walk through the (name, value) pairs and return values.
}
else
rc= HA_ERR_KEY_NOT_FOUND;
}
#ifdef NEW_CODE
se->get_slice();
for each column
{
find column;
}
#endif
DBUG_RETURN(rc);
}
int ha_cassandra::write_row(uchar *buf)
{
my_bitmap_map *old_map;
char buff[512];
NameAndValue *tuple;
NameAndValue *nv;
DBUG_ENTER("ha_cassandra::write_row");
/* Temporary malloc-happy code just to get INSERTs to work */
nv= tuple= get_names_and_vals();
old_map= dbug_tmp_use_all_columns(table, table->read_set);
for (Field **field= table->field; *field; field++, nv++)
{
String tmp(buff,sizeof(buff), &my_charset_bin);
tmp.length(0);
String *str;
str= (*field)->val_str(&tmp);
nv->name= (char*)(*field)->field_name;
nv->value_len= str->length();
nv->value= (char*)my_malloc(nv->value_len, MYF(0));
memcpy(nv->value, str->ptr(), nv->value_len);
}
nv->name= NULL;
dbug_tmp_restore_column_map(table->read_set, old_map);
//invoke!
bool res= se->insert(tuple);
for (nv= tuple; nv->name; nv++)
{
my_free(nv->value);
}
DBUG_RETURN(res? HA_ERR_INTERNAL_ERROR: 0);
}
NameAndValue *ha_cassandra::get_names_and_vals()
{
if (names_and_vals)
return names_and_vals;
else
{
size_t size= sizeof(NameAndValue) * (table->s->fields + 1);
names_and_vals= (NameAndValue*)my_malloc(size ,0);
memset(names_and_vals, 0, size);
return names_and_vals;
}
}
/////////////////////////////////////////////////////////////////////////////
// Dummy implementations start
/////////////////////////////////////////////////////////////////////////////
int ha_cassandra::index_next(uchar *buf)
{
int rc;
DBUG_ENTER("ha_cassandra::index_next");
rc= HA_ERR_WRONG_COMMAND;
DBUG_RETURN(rc);
}
int ha_cassandra::index_prev(uchar *buf)
{
int rc;
DBUG_ENTER("ha_cassandra::index_prev");
rc= HA_ERR_WRONG_COMMAND;
DBUG_RETURN(rc);
}
int ha_cassandra::index_first(uchar *buf)
{
int rc;
DBUG_ENTER("ha_cassandra::index_first");
rc= HA_ERR_WRONG_COMMAND;
DBUG_RETURN(rc);
}
int ha_cassandra::index_last(uchar *buf)
{
int rc;
DBUG_ENTER("ha_cassandra::index_last");
rc= HA_ERR_WRONG_COMMAND;
DBUG_RETURN(rc);
}
int ha_cassandra::rnd_init(bool scan)
{
DBUG_ENTER("ha_cassandra::rnd_init");
DBUG_RETURN(0);
}
int ha_cassandra::rnd_end()
{
DBUG_ENTER("ha_cassandra::rnd_end");
DBUG_RETURN(0);
}
int ha_cassandra::rnd_next(uchar *buf)
{
int rc;
DBUG_ENTER("ha_cassandra::rnd_next");
rc= HA_ERR_END_OF_FILE;
DBUG_RETURN(rc);
}
void ha_cassandra::position(const uchar *record)
{
DBUG_ENTER("ha_cassandra::position");
DBUG_VOID_RETURN;
}
int ha_cassandra::rnd_pos(uchar *buf, uchar *pos)
{
int rc;
DBUG_ENTER("ha_cassandra::rnd_pos");
rc= HA_ERR_WRONG_COMMAND;
DBUG_RETURN(rc);
}
ha_rows ha_cassandra::records_in_range(uint inx, key_range *min_key,
key_range *max_key)
{
DBUG_ENTER("ha_cassandra::records_in_range");
DBUG_RETURN(10); // low number to force index usage
}
int ha_cassandra::update_row(const uchar *old_data, uchar *new_data)
{
DBUG_ENTER("ha_cassandra::update_row");
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_cassandra::info(uint flag)
{
DBUG_ENTER("ha_cassandra::info");
DBUG_RETURN(0);
}
int ha_cassandra::extra(enum ha_extra_function operation)
{
DBUG_ENTER("ha_cassandra::extra");
DBUG_RETURN(0);
}
THR_LOCK_DATA **ha_cassandra::store_lock(THD *thd,
THR_LOCK_DATA **to,
enum thr_lock_type lock_type)
{
if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK)
lock.type=lock_type;
*to++= &lock;
return to;
}
int ha_cassandra::external_lock(THD *thd, int lock_type)
{
DBUG_ENTER("ha_cassandra::external_lock");
DBUG_RETURN(0);
}
int ha_cassandra::delete_table(const char *name)
{
DBUG_ENTER("ha_cassandra::delete_table");
/* This is not implemented but we want someone to be able that it works. */
DBUG_RETURN(0);
}
int ha_cassandra::delete_row(const uchar *buf)
{
DBUG_ENTER("ha_cassandra::delete_row");
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
int ha_cassandra::delete_all_rows()
{
DBUG_ENTER("ha_cassandra::delete_all_rows");
DBUG_RETURN(HA_ERR_WRONG_COMMAND);
}
/**
check_if_incompatible_data() called if ALTER TABLE can't detect otherwise
if new and old definition are compatible
@details If there are no other explicit signs like changed number of
fields this function will be called by compare_tables()
(sql/sql_tables.cc) to decide should we rewrite whole table or only .frm
file.
*/
bool ha_cassandra::check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes)
{
//ha_table_option_struct *param_old, *param_new;
DBUG_ENTER("ha_cassandra::check_if_incompatible_data");
DBUG_RETURN(COMPATIBLE_DATA_YES);
}
/////////////////////////////////////////////////////////////////////////////
// Dummy implementations end
/////////////////////////////////////////////////////////////////////////////
static struct st_mysql_sys_var* cassandra_system_variables[]= {
// MYSQL_SYSVAR(enum_var),
// MYSQL_SYSVAR(ulong_var),
NULL
};
struct st_mysql_storage_engine cassandra_storage_engine=
{ MYSQL_HANDLERTON_INTERFACE_VERSION };
static struct st_mysql_show_var func_status[]=
{
// {"example_func_example", (char *)show_func_example, SHOW_FUNC},
{0,0,SHOW_UNDEF}
};
maria_declare_plugin(cassandra)
{
MYSQL_STORAGE_ENGINE_PLUGIN,
&cassandra_storage_engine,
"CASSANDRA",
"Monty Program Ab",
"Cassandra storage engine",
PLUGIN_LICENSE_GPL,
cassandra_init_func, /* Plugin Init */
cassandra_done_func, /* Plugin Deinit */
0x0001, /* version number (0.1) */
func_status, /* status variables */
cassandra_system_variables, /* system variables */
"0.1", /* string version */
MariaDB_PLUGIN_MATURITY_EXPERIMENTAL /* maturity */
}
maria_declare_plugin_end;

218
storage/cassandra/ha_cassandra.h

@ -0,0 +1,218 @@
/*
MP AB copyrights
*/
#ifdef USE_PRAGMA_INTERFACE
#pragma interface /* gcc class implementation */
#endif
#include "my_global.h" /* ulonglong */
#include "thr_lock.h" /* THR_LOCK, THR_LOCK_DATA */
#include "handler.h" /* handler */
#include "my_base.h" /* ha_rows */
#include "cassandra_se.h"
/** @brief
CASSANDRA_SHARE is a structure that will be shared among all open handlers.
This example implements the minimum of what you will probably need.
*/
typedef struct st_cassandra_share {
char *table_name;
uint table_name_length,use_count;
mysql_mutex_t mutex;
THR_LOCK lock;
} CASSANDRA_SHARE;
/** @brief
Class definition for the storage engine
*/
class ha_cassandra: public handler
{
THR_LOCK_DATA lock; ///< MySQL lock
CASSANDRA_SHARE *share; ///< Shared lock info
Cassandra_se_interface *se;
/* pre-allocated array of #fields elements */
NameAndValue *names_and_vals;
NameAndValue *get_names_and_vals();
public:
ha_cassandra(handlerton *hton, TABLE_SHARE *table_arg);
~ha_cassandra()
{
delete se;
}
/** @brief
The name that will be used for display purposes.
*/
const char *table_type() const { return "CASSANDRA"; }
/** @brief
The name of the index type that will be used for display.
Don't implement this method unless you really have indexes.
*/
const char *index_type(uint inx) { return "HASH"; }
/** @brief
The file extensions.
*/
const char **bas_ext() const;
/** @brief
This is a list of flags that indicate what functionality the storage engine
implements. The current table flags are documented in handler.h
*/
ulonglong table_flags() const
{
/*
We are saying that this engine is just statement capable to have
an engine that can only handle statement-based logging. This is
used in testing.
*/
return HA_BINLOG_STMT_CAPABLE;
}
/** @brief
This is a bitmap of flags that indicates how the storage engine
implements indexes. The current index flags are documented in
handler.h. If you do not implement indexes, just return zero here.
@details
part is the key part to check. First key part is 0.
If all_parts is set, MySQL wants to know the flags for the combined
index, up to and including 'part'.
*/
ulong index_flags(uint inx, uint part, bool all_parts) const
{
return 0;
}
/** @brief
unireg.cc will call max_supported_record_length(), max_supported_keys(),
max_supported_key_parts(), uint max_supported_key_length()
to make sure that the storage engine can handle the data it is about to
send. Return *real* limits of your storage engine here; MySQL will do
min(your_limits, MySQL_limits) automatically.
*/
uint max_supported_record_length() const { return HA_MAX_REC_LENGTH; }
/* Support only one Primary Key, for now */
uint max_supported_keys() const { return 1; }
uint max_supported_key_parts() const { return 1; }
/** @brief
unireg.cc will call this to make sure that the storage engine can handle
the data it is about to send. Return *real* limits of your storage engine
here; MySQL will do min(your_limits, MySQL_limits) automatically.
@details
There is no need to implement ..._key_... methods if your engine doesn't
support indexes.
*/
uint max_supported_key_length() const { return 16*1024; /* just to return something*/ }
/* At the moment, we're ok with default handler::index_init() implementation. */
int index_read_map(uchar * buf, const uchar * key,
key_part_map keypart_map,
enum ha_rkey_function find_flag);
/** @brief
Called in test_quick_select to determine if indexes should be used.
*/
virtual double scan_time() { return (double) (stats.records+stats.deleted) / 20.0+10; }
/** @brief
This method will never be called if you do not implement indexes.
*/
virtual double read_time(uint, uint, ha_rows rows)
{ return (double) rows / 20.0+1; }
/*
Everything below are methods that we implement in ha_example.cc.
Most of these methods are not obligatory, skip them and
MySQL will treat them as not implemented
*/
/** @brief
We implement this in ha_example.cc; it's a required method.
*/
int open(const char *name, int mode, uint test_if_locked); // required
/** @brief
We implement this in ha_example.cc; it's a required method.
*/
int close(void); // required
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
int write_row(uchar *buf);
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
int update_row(const uchar *old_data, uchar *new_data);
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
int delete_row(const uchar *buf);
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
int index_next(uchar *buf);
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
int index_prev(uchar *buf);
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
int index_first(uchar *buf);
/** @brief
We implement this in ha_example.cc. It's not an obligatory method;
skip it and and MySQL will treat it as not implemented.
*/
int index_last(uchar *buf);
/** @brief
Unlike index_init(), rnd_init() can be called two consecutive times
without rnd_end() in between (it only makes sense if scan=1). In this
case, the second call should prepare for the new table scan (e.g if
rnd_init() allocates the cursor, the second call should position the
cursor to the start of the table; no need to deallocate and allocate
it again. This is a required method.
*/
int rnd_init(bool scan); //required
int rnd_end();
int rnd_next(uchar *buf); ///< required
int rnd_pos(uchar *buf, uchar *pos); ///< required
void position(const uchar *record); ///< required
int info(uint); ///< required
int extra(enum ha_extra_function operation);
int external_lock(THD *thd, int lock_type); ///< required
int delete_all_rows(void);
ha_rows records_in_range(uint inx, key_range *min_key,
key_range *max_key);
int delete_table(const char *from);
int create(const char *name, TABLE *form,
HA_CREATE_INFO *create_info); ///< required
bool check_if_incompatible_data(HA_CREATE_INFO *info,
uint table_changes);
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
enum thr_lock_type lock_type); ///< required
};
Loading…
Cancel
Save