Advabced Database Technology Lab Record-1
Advabced Database Technology Lab Record-1
AIM:
To execute the queries to perform CRUD operations, Indexing, Sharding, Deployment in MongoDB.
PROCEDURE:
QUERIES:
CRUD Queries:
1
//Every row/document can be different than other
> db.emp.insert({name:'Amit',rno:2})
> db.emp.insert({rno:3, email_id:'[email protected]'})
> db.emp.find()
{ "_id" : ObjectId("5d7d3daf315728b4998f522e"), "rno" : 1, "name" : "Bhavana" }
{ "_id" : ObjectId("5d7d3f28315728b4998f522f"), "name" : "Amit", "rno" : 2 }
{ "_id" : ObjectId("5d7d3f56315728b4998f5230"), "rno" : 3, "email_id" : "[email protected]" }
{ "_id" : 1, "rno" : 4, "name" : "Akash" }
// trying to insert data with duplicate _id, it will not accept as _id is primary key field
> db.emp.insert({_id:1,rno:5,name:"Reena"})
E11000 duplicate key error index: db1.emp.$_id_ dup key: { : 1.0 }
> db.emp.find()
{ "_id" : ObjectId("5d7d3daf315728b4998f522e"), "rno" : 1, "name" : "Bhavana" }
{ "_id" : ObjectId("5d7d3f28315728b4998f522f"), "name" : "Amit", "rno" : 2 }
{ "_id" : ObjectId("5d7d3f56315728b4998f5230"), "rno" : 3, "email_id" : "[email protected]" }
{ "_id" : 1, "rno" : 4, "name" : "Akash" }
{ "_id" : 2, "rno" : 5, "name" : "Reena" }
{ "_id" : ObjectId("5d7d4244315728b4998f5231"), "rno" : 7, "name" : "a" }
{ "_id" : ObjectId("5d7d4244315728b4998f5232"), "rno" : 8, "name" : "b" }
{ "_id" : ObjectId("5d7d4244315728b4998f5233"), "rno" : 8, "name" : "c" }
> db.emp.find()
{ "_id" : ObjectId("5d7d3daf315728b4998f522e"), "rno" : 1, "name" : "Bhavana" }
{ "_id" : ObjectId("5d7d3f28315728b4998f522f"), "name" : "Amit", "rno" : 2 }
{ "_id" : ObjectId("5d7d3f56315728b4998f5230"), "rno" : 3, "email_id" : "[email protected]" }
{ "_id" : 1, "rno" : 4, "name" : "Akash" }
{ "_id" : 2, "rno" : 5, "name" : "Reena" }
{ "_id" : ObjectId("5d7d4244315728b4998f5231"), "rno" : 7, "name" : "a" }
2
{ "_id" : ObjectId("5d7d4244315728b4998f5232"), "rno" : 8, "name" : "b" }
{ "_id" : ObjectId("5d7d4244315728b4998f5233"), "rno" : 8, "name" : "c" }
{ "_id" : ObjectId("5d7d433a315728b4998f5234"), "rno" : 10, "name" : "Ankit", "hobbies" :
[ "singing", "cricket", "swimming" ], "age" : 21 }
> db.emp.find()
{ "_id" : ObjectId("5d7d3daf315728b4998f522e"), "rno" : 1, "name" : "Bhavana" }
{ "_id" : ObjectId("5d7d3f28315728b4998f522f"), "name" : "Amit", "rno" : 2 }
{ "_id" : ObjectId("5d7d3f56315728b4998f5230"), "rno" : 3, "email_id" : "[email protected]" }
{ "_id" : 1, "rno" : 4, "name" : "Akash" }
{ "_id" : 2, "rno" : 5, "name" : "Reena" }
{ "_id" : ObjectId("5d7d4244315728b4998f5231"), "rno" : 7, "name" : "a" }
{ "_id" : ObjectId("5d7d4244315728b4998f5232"), "rno" : 8, "name" : "b" }
{ "_id" : ObjectId("5d7d4244315728b4998f5233"), "rno" : 8, "name" : "c" }
{ "_id" : ObjectId("5d7d433a315728b4998f5234"), "rno" : 10, "name" : "Ankit", "hobbies" :
[ "singing", "cricket", "swimming" ], "age" : 21 }
{ "_id" : ObjectId("5d7d4462315728b4998f5235"), "rno" : 11, "Name" : { "Fname" : "Bhavana",
"Mname" : "Amit", "Lname" : "Khivsara" } }
{ "_id" : ObjectId("5d7d4574315728b4998f5236"), "rno" : 12, "Name" : "Janvi", "Address" :
{ "Flat" : 501, "Building" : "Sai Appart", "area" : "Tidke colony", "city" : "Nashik", "state" : "MH",
"pin" : 423101 }, "age" : 22 }
{ "_id" : ObjectId("5d7d465d315728b4998f5237"), "rno" : 15, "name" : "Ravina", "dob" :
ISODate("2019-09-14T00:00:00Z") }
>
5
//Find command with condition with giving name field only to show
> db.stud.find({rno:5},{name:1})
{ "_id" : ObjectId("5d83af5aa44331f62bcd836d"), "name" : "Jivan" }
//Find command with condition with giving name field only to show and _id to hide
> db.stud.find({rno:5},{name:1,_id:0})
{ "name" : "Jivan" }
//Dispay rno & name whose rno is greater than 2. Show output in decending order by
rno
> db.stud.find({rno:{$gt:2}},{_id:0}).sort({rno:-1})
{ "rno" : 5, "name" : "Jivan" }
{ "rno" : 4, "name" : "Reena" }
{ "rno" : 3, "name" : "Sagar" }
// Limit use to show only some records from starting- following command shows only
first 2 records from collection
> db.stud.find().limit(2)
{ "_id" : ObjectId("5d83af5aa44331f62bcd8369"), "rno" : 1, "name" : "Ashiti" }
7
{ "_id" : ObjectId("5d83af5aa44331f62bcd836a"), "rno" : 2, "name" : "Savita" }
// Skip use to show all records after skipping some records- following command shows
all records after first 2 records from collection
> db.stud.find().skip(2)
{ "_id" : ObjectId("5d83af5aa44331f62bcd836b"), "rno" : 3, "name" : "Sagar" }
{ "_id" : ObjectId("5d83af5aa44331f62bcd836c"), "rno" : 4, "name" : "Reena" }
{ "_id" : ObjectId("5d83af5aa44331f62bcd836d"), "rno" : 5, "name" : "Jivan" }
{ "_id" : ObjectId("5d83b8d9a44331f62bcd836e"), "rno" : 5, "name" : "Radhika" }
{ "_id" : ObjectId("5d83b8eba44331f62bcd836f"), "rno" : 3, "name" : "Manioj" }
> db.stud.find({rno:{$gt:2}}).count()
5
//$unset will remove the column rno from document matching the given condition
> db.stud.update({rno:1},{$unset:{rno:1}})
//upsert use to update document if condition found otherwise insert document with
updates values.
> db.stud.update({rno:50},{$set:{rno:55}},{upsert:true})
Indexing:
//To create index on rno in ascending order(1)- //Single field Index example
>db.stud.createIndex({rno:1})
//To show the list of Index , v is version, key is on which field you created index
//ns-name space(database name.collection name), name- Name of index given by mongodb
>db.stud.getIndexes()
[
{
9
"v" : 1,
"key" : {
"_id" : 1
},
"ns" : "db1.stud",
"name" : "_id_"
},
{
"v" : 1,
"key" : {
"rno" : 1
},
"ns" : "db1.stud",
"name" : "rno_1"
}
]
>db.stud.getIndexes()
[
{
"v" : 1,
"key" : {
"_id" : 1
},
"ns" : "db1.stud",
"name" : "_id_"
},
{
"v" : 1,
"key" : {
"rno" : 1
},
"ns" : "db1.stud",
"name" : "rno_1"
},
{
"v" : 1,
"key" : {
"rno" : -1,
"name" : 1
},
"ns" : "db1.stud",
"name" : "rno_-1_name_1"
}
]
RESULT:
The queries to perform CRUD, Indexing, Sharding and Deployment was executed successfully.
11
EX No: NOSQL EXERCISES
Cassandra: Table Operations, CRUD Operations, CQL Types.
Date :
AIM:
To execute the queries to perform Table Operations, CRUD operations, CQL Types in Cassandra.
PROCEDURE:
QUERIES:
1. Create Keyspace:
2. Table Operations
cqlsh:test> CREATE TABLE emp( emp_id int PRIMARY KEY, emp_name text, emp_city text,
emp_sal varint, emp_phone varint);
3. CRUD Operations
1. Create data
cqlsh:test> INSERT INTO emp (emp_id, emp_name, emp_city,
emp_phone, emp_sal) VALUES (1,'ram', 'Hyderabad', 9848022338, 50000);
12
2. To read all data from table
cqlsh:test> SELECT * FROM emp;
emp_name | emp_sal
----------+---------
ram | 50000
robin | 50000
rajeev | 30000
rahman | 50000
(4 rows)
4. To create index
cqlsh:test> CREATE INDEX sal ON emp(emp_sal);
cqlsh:test> SELECT * FROM emp WHERE emp_sal=50000;
5. To drop index
cqlsh:test> drop index sal;
4. CQL Types
CQL provides a rich set of built-in data types, including collection types. Along with these data
types, users can also create their own custom data types. The following table provides a list of
built-in data types available in CQL.
Data Type Constants Description
14
Collection Types
Cassandra Query Language also provides a collection data types. The following table provides a
list of Collections available in CQL.
Collection Description
User-defined datatypes
Cqlsh provides users a facility of creating their own data types. Given below are the commands
used while dealing with user defined data types.
CREATE TYPE − Creates a user-defined data type.
ALTER TYPE − Modifies a user-defined data type.
DROP TYPE − Drops a user-defined data type.
DESCRIBE TYPE − Describes a user-defined data type.
DESCRIBE TYPES − Describes user-defined data types.
3. Create a table for storing user data in columns of type fullname and address. Use
the frozen keyword in the definition of the user-defined type column.
CREATE TABLE mykeyspace.users (
id uuid PRIMARY KEY,
name frozen <fullname>,
direct_reports set<frozen <fullname>>, // a collection set
addresses map<text, frozen <address>> // a collection map);
15
6. Retrieve the full name of a user.
SELECT name FROM mykeyspace.users WHERE id=62c36092-82a1-3a00-93d1-
46196ee77204;
name
-------------------------------------------------
{firstname: 'Marie-Claude', lastname: 'Josset'}
7. Using dot notation, you can retrieve a component of the user-defined type
column.
SELECT name.lastname FROM mykeyspace.users WHERE id=62c36092-82a1-3a00-93d1-
46196ee77204;
name.lastname
---------------
Josset
8. To create index
CREATE INDEX on mykeyspace.users (name);
SELECT id FROM mykeyspace.users WHERE name = {firstname: 'Marie-Claude',
lastname: 'Josset'};
id
--------------------------------------
62c36092-82a1-3a00-93d1-46196ee77204
direct_reports
-----------------------------------------------------------------------------------
{{firstname: 'Jeiranan', lastname: 'Thongnopneua'}}
{{firstname: 'Naoko', lastname: 'Murai'}, {firstname: 'Sompom', lastname: 'Peh'}}
RESULT:
The queries to create keyspace, create table and CQL types have been executed successfully.
16
EX No: NOSQL EXERCISES
HIVE: Data types, Database Operations, Partitioning – HiveQL
Date :
AIM:
To create a database for executing database operations , partitioning and execute hive queries in
HIVE.
PROCEDURE:
Step 1: Start the hadoop and run it in behind.
Step 2: Start the derby server and let it run in background.
Step 3: Start yarn/ MapReduce
Step 4: Start network server (use 0.0.0.0 as host address)
Step 5: Start hive. Create the database .Perform basic queries to perform database operations.
Step 5: Perform basic queries to perform partitioning. Perform basic queries for HiveQL .
Column Types
Column type are used as column data types of Hive. They are as follows:
Integral Types
Integer type data can be specified using integral data types, INT. When the data range exceeds the
range of INT, you need to use BIGINT and if the data range is smaller than the INT, you use
SMALLINT. TINYINT is smaller than SMALLINT.
String Types
String type data types can be specified using single quotes (' ') or double quotes (" "). It contains two
data types: VARCHAR and CHAR. Hive follows C-types escape characters.
Timestamp
It supports traditional UNIX timestamp with optional nanosecond precision. It supports
java.sql.Timestamp format “YYYY-MM-DD HH:MM:SS.fffffffff” and format “yyyy-mm-dd
hh:mm:ss.ffffffffff”.
17
Dates
DATE values are described in year/month/day format in the form {{YYYY-MM-DD}}.
Decimals
The DECIMAL type in Hive is as same as Big Decimal format of Java. It is used for representing
immutable arbitrary precision. The syntax and example is as follows:
Union Types
Union is a collection of heterogeneous data types. You can create an instance using create union. The
syntax and example is as follows:
Literals
The following literals are used in Hive:
Decimal Type
Decimal type data is nothing but floating point value with higher range than DOUBLE data type. The
range of decimal type is approximately -10-308 to 10308.
Null Value
Missing values are represented by the special value NULL.
Complex Types
The Hive complex data types are as follows:
Arrays
Arrays in Hive are used the same way they are used in Java.
Syntax: ARRAY<data_type>
Maps
Maps in Hive are similar to Java Maps.
Syntax: MAP<primitive_type, data_type>
Structs
Structs in Hive is similar to using complex data with comment.
Syntax: STRUCT<col_name : data_type [COMMENT col_comment], ...>
To set a database
hive> USE financials;
To delete a database
hive> DROP DATABASE IF EXISTS financials;
To create table
CREATE TABLE IF NOT EXISTS mydb.employees (
name STRING COMMENT 'Employee name',
salary FLOAT COMMENT 'Employee salary',
subordinates ARRAY<STRING> COMMENT 'Names of subordinates',
19
deductions MAP<STRING, FLOAT>
COMMENT 'Keys are deductions names, values are percentages',
address STRUCT<street:STRING, city:STRING, state:STRING, zip:INT>
COMMENT 'Home address')
COMMENT 'Description of the table'
TBLPROPERTIES ('creator'='me', 'created_at'='2021-01-02 10:00:00', ...)
LOCATION '/user/hive/warehouse/mydb.db/employees';
To copy a schema
CREATE TABLE IF NOT EXISTS mydb.employees2
LIKE mydb.employees;
To delete a table
DROP TABLE IF EXISTS employees;
RESULT:
The database its operations , partitioning and hive queries have been executed successfully in HIVE.
21
EX No: NOSQL EXERCISES
OrientDB Graph database – OrientDB Features
Date :
AIM:
To study about the OrientDB Graph database and its features.
THEORY:
Introduction
OrientDB is an Open Source NoSQL Database Management System, which contains the features of
traditional DBMS along with the new features of both Document and Graph DBMS. It is written in
Java and is amazingly fast. It can store 220,000 records per second on commodity hardware.
OrientDB, is one of the best open-source, multi-model, next generation NoSQL product.
OrientDB is an Open Source NoSQL Database Management System. NoSQL Database provides a
mechanism for storing and retrieving NO-relation or NON-relational data that refers to data other
than tabular data such as document data or graph data. NoSQL databases are increasingly used in Big
Data and real-time web applications. NoSQL systems are also sometimes called "Not Only SQL" to
emphasize that they may support SQL-like query languages.
OrientDB also belongs to the NoSQL family. OrientDB is a second generation Distributed Graph
Database with the flexibility of Documents in one product with an open source of Apache 2 license.
Query language Has its own language based on JSON. Query language is built on SQL.
Uses the B-Tree algorithm for all Supports three different indexing
Indexes indexes. algorithms so that the user can
achieve best performance.
OrientDB is the first Multi-Model open source NoSQL DBMS that brings together the power of
graphs and flexibility of documents into a scalable high-performance operational database.
The main feature of OrientDB is to support multi-model objects, i.e. it supports different models like
Document, Graph, Key/Value and Real Object. It contains a separate API to support all these four
models.
22
Document Model
The terminology Document model belongs to NoSQL database. It means the data is stored in the
Documents and the group of Documents are called as Collection. Technically, document means a set
of key/value pairs or also referred to as fields or properties.
OrientDB uses the concepts such as classes, clusters, and link for storing, grouping, and analyzing
the documents.
The following table illustrates the comparison between relational model, document model, and
OrientDB document model
Graph Model
A graph data structure is a data model that can store data in the form of Vertices (Nodes)
interconnected by Edges (Arcs). The idea of OrientDB graph database came from property graph.
The vertex and edge are the main artifacts of the Graph model. They contain the properties, which
can make these appear similar to documents.
The following table shows a comparison between graph model, relational data model, and OrientDB
graph model.
Table Vertex and Edge Class Class that extends "V" (for Vertex) and "E" (for
Edges)
The following table illustrates the comparison between relational model, key/value model, and
OrientDB key/value model.
23
Relational Model Key/Value Model OrientDB Key/Value Model
The following table illustrates the comparison between relational model, Object model, and
OrientDB Object model.
Record ID
When OrientDB generates a record, the database server automatically assigns a unit identifier to the
record, called RecordID (RID). The RID looks like #<cluster>:<position>. <cluster> means cluster
identification number and the <position> means absolute position of the record in the cluster.
Documents
The Document is the most flexible record type available in OrientDB. Documents are softly typed
and are defined by schema classes with defined constraint, but you can also insert the document
without any schema, i.e. it supports schema-less mode too.
Documents can be easily handled by export and import in JSON format. For example, take a look at
the following JSON sample document. It defines the document details.
{
"id" : "1201",
"name" : "Jay",
24
"job" : "Developer",
"creations" : [
{
"name" : "Amiga",
"company" : "Commodore Inc."
},
{
"name" : "Amiga 500",
"company" : "Commodore Inc."
}
]
}
RecordBytes
Record Type is the same as BLOB type in RDBMS. OrientDB can load and store document Record
type along with binary data.
Vertex
OrientDB database is not only a Document database but also a Graph database. The new concepts
such as Vertex and Edge are used to store the data in the form of graph. In graph databases, the most
basic unit of data is node, which in OrientDB is called a vertex. The Vertex stores information for the
database.
Edge
There is a separate record type called the Edge that connects one vertex to another. Edges are
bidirectional and can only connect two vertices. There are two types of edges in OrientDB, one is
regular and another one lightweight.
Class
The class is a type of data model and the concept drawn from the Object-oriented programming
paradigm. Based on the traditional document database model, data is stored in the form of collection,
while in the Relational database model data is stored in tables. OrientDB follows the Document API
along with OPPS paradigm. As a concept, the class in OrientDB has the closest relationship with the
table in relational databases, but (unlike tables) classes can be schema-less, schema-full or mixed.
Classes can inherit from other classes, creating trees of classes. Each class has its own cluster or
clusters, (created by default, if none are defined).
Cluster
Cluster is an important concept which is used to store records, documents, or vertices. In simple
words, Cluster is a place where a group of records are stored. By default, OrientDB will create one
cluster per class. All the records of a class are stored in the same cluster having the same name as the
class. You can create up to 32,767(2^15-1) clusters in a database.
The CREATE class is a command used to create a cluster with specific name. Once the cluster is
created you can use the cluster to save records by specifying the name during the creation of any data
model.
Relationships
OrientDB supports two kinds of relationships: referenced and embedded. Referenced
relationships means it stores direct link to the target objects of the relationships. Embedded
relationships means it stores the relationship within the record that embeds it. This relationship is
stronger than the reference relationship.
25
Database
The database is an interface to access the real storage. IT understands high-level concepts such as
queries, schemas, metadata, indices, and so on. OrientDB also provides multiple database types. For
more information on these types, see Database Types.
RESULT:
The OrientDB Graph database concepts are examined along with its features successfully.
26
EX No: MySQL Database Creation, Table Creation, Query.
Date :
AIM:
To execute the basic queries like database creation, table creation and perform basic queries on tables
in MYSQL.
PROCEDURE:
1. Create database.
2. Create the needed Tables
A. Consider the following schema for a LibraryDatabase:
1. BOOK (Book_id, Title, Publisher_Name, Pub_Year)
2. BOOK_AUTHORS (Book_id, Author_Name)
3. PUBLISHER (Name, Address, Phone)
4. BOOK_COPIES(Book_id, Branch_id, No-of_Copies)
5. BOOK_LENDING (Book_id, Branch_id, Card_No, Date_Out, Due_Date)
6. LIBRARY_BRANCH (Branch_id, Branch_Name, Address)
3. Insert needed number of values (tuples) into the tables
4. Perform the various queries given below:
1. Retrieve details of all books in the library – id, title, name of publisher, authors, number
of copies in each branch, etc.
2. Get the particulars of borrowers who have borrowed more than 3 books, but from Jan
2017 to Jun2017
3. Delete a book in BOOK table. Update the contents of other tables to reflect this data
manipulation operation.
4. Partition the BOOK table based on year of publication. Demonstrate its working with a
simple query.
5. Create a view of all books and its number of copies that are currently available in the
Library.
QUERIES - SYNTAX:
1. Database Creation
USE LIBRARYDATABASE;
2. Table Creation
CREATE TABLE PUBLISHER (NAME VARCHAR (20) PRIMARY KEY, PHONE BIGINT,
ADDRESS VARCHAR (20));
CREATE TABLE BOOK (BOOK_ID INTEGER PRIMARY KEY, TITLE VARCHAR (20),
PUBLISHER_NAME VARCHAR(20), PUB_YEAR VARCHAR (20), FOREIGN KEY
(PUBLISHER_NAME) REFERENCES PUBLISHER (NAME) ON DELETE CASCADE);
27
CREATE TABLE LIBRARY_BRANCH (BRANCH_ID INTEGER PRIMARY KEY,
BRANCH_NAME VARCHAR (50), ADDRESS VARCHAR (50));
CREATE TABLE BOOK_COPIES (NO_OF_COPIES INTEGER, BOOK_ID INTEGER,
BRANCH_ID INTEGER, FOREIGN KEY (BOOK_ID) REFERENCES BOOK (BOOK_ID) ON
DELETE CASCADE, FOREIGN KEY(BRANCH_ID) REFERENCES LIBRARY_BRANCH
(BRANCH_ID) ON DELETE CASCADE, PRIMARY KEY (BOOK_ID, BRANCH_ID));
4. Basic Queries
1. Query to Retrieve details of all books in the library – id, title, name of publisher, authors,
number of copies in each branch, etc.
2. Query to Get the particulars of borrowers who have borrowed more than 3 books, but from
Jan 2017 to Jun2017.
29
3. Query to Delete a book in BOOK table. Update the contents of other tables to reflect this data
manipulation operation.
4. Query to Partition the BOOK table based on year of publication. Demonstrate its working
with a simple query.
5. Query to Create a view of all books and its number of copies that are currently available in
the Library.
RESULT :
The queries to create database, create table and query table have been executed successfully.
30
EX No: MySQL Replication – Distributed Databases
Date :
AIM:
To implement Replication in distributed database using MYSQL.
THEORY :
MYSQL - Replication
MySQL supports replication capabilities that allow the databases on one server to be made available
on another server. Replication is used for many purposes. For example, by replicating your
databases, you have multiple copies available in case a server crashes or goes offline. Clients can use
a different server if the one that they normally use becomes unavailable. Replication also can be used
to distribute client load. Rather than having a single server to which all clients connect, you can set
up multiple servers that each handle a fraction of the client load.
A replication slave is set up initially by transferring an exact copy of the to-be-replicated databases
from the master server to the slave server. Thereafter, each replicated database is kept synchronized
to the original database. When the master server makes modifications to its databases, it sends those
changes to each slave server, which makes the changes to its copy of the replicated databases.
PROCEDURE:
Setting Up Replication
To set up replication, each slave requires the following:
o A backup copy of the master's databases. This is the replication "baseline" that sets the slave
to a known initial state of the master.
o The filename and position within the master's binary log that corresponds to the time of the
backup. The values are called the "replication coordinates." They are needed so that the slave
can tell the master that it wants all updates made from that point on.
o An account on the master server that the slave can use for connecting to the master and
requesting updates. The account must have the global REPLICATION SLAVE privilege. For
example, you can set up an account for a slave by issuing these statements on the master
server, where slave_user and slave_pass are the username and password for the account,
and slave_host is the host from which the slave server will connect:
Also, you must assign a unique ID value to each server that will participate in your replication setup.
ID values are positive integers in the range from 1 to 232
1. The easiest way to assign these ID values is by placing a server-id option in each server's option
file:
[mysqld] server-id=id_value
31
It's common, though not required, to use an ID of 1 for the master server and values greater than 1
for the slaves. The following procedure describes the general process for setting up replication.
1. Ensure that binary logging is enabled on the master server. If it is not, stop the server, enable
logging, and restart the server.
2. On the master server, make a backup of all databases to be replicated. One way to do this is by
using mysqldump:
Assuming that binary logging is enabled, the --master-data=2 option causes the dump file to
include a comment containing a CHANGE MASTER statement that indicates the replication
coordinates as of the time of the backup. These coordinates can be used later when you tell the
slave where to begin replicating in the master's binary log.
3. Copy the dump file to the replication slave host and load it into the MySQL server on that
machine:
4. Tell the slave what master to connect to and the position in the master's binary log at which to
begin replicating. To do this, connect to the slave server and issue a CHANGE
MASTER statement:
The hostname is the host where the master server is running. The username and password are
those for the slave account that you set up on the master. The log file and position are the
replication coordinates in the master's binary log. (You can get these from the CHANGE
MASTER statement near the beginning of the dump file.)
After you perform the preceding procedure, issue a START SLAVE statement. The slave should
connect to the master and begin replicating updates that the master sends to it. The slave also creates
a master.info file in its data directory and records the values from the CHANGE MASTER statement
in the file. As the slave reads updates from the master, it changes the replication coordinates in
the master.info file accordingly. Also, when the slave restarts in the future, it looks in this file to
determine which master to use.
By default, the master server logs updates for all databases, and the slave server replicates all updates
that it receives from the master. For more fine-grained control, it's possible to tell a master which
databases to log updates for, and to tell a slave which of those updates that it receives from the
master to apply. You can either name databases to be replicated (in which case those not named are
ignored), or you can name databases to ignore (in which case those not named are replicated). The
master host options are --binlog-do-db and --binlog-ignore-db. The slave host options are --replicate-
do-db and --replicate-ignore-db.
The following example illustrates how this works, using the options that enable replication for
specific databases. Suppose that a master server has three databases named a, b, and c. You can elect
to replicate only databases a and b when you start the master server by placing these options in an
option file read by that server:
Enabling binary logging only for certain databases has an unfortunate side effect: Data recovery
operations require both your backup files and your binary logs, so for any database not logged in the
binary log, full recovery cannot be performed. For this reason, you might prefer to have the master
log changes for all databases to the binary log, and instead filter updates on the slave side.
A slave that takes no filtering action will replicate all events that it receives. If a slave should
replicate events only for certain databases, such as databases a and c, you can start it with these lines
in an option file:
RESULT:
The MYSQL replication was executed successfully.
33
EX No: Spatial data storage and retrieval in MySQL
Date :
AIM:
To create a spatial data storage and retrieve data in mysql.
PROCEDURE:
QUERIES:
Use the ALTER TABLE statement to add or drop a spatial column to or from an existing table
ALTER TABLE geom ADD pt POINT;
ALTER TABLE geom DROP pt;
SET @g = 'POLYGON((0 0,10 0,10 10,0 10,0 0),(5 5,7 5,7 7,5 7, 5 5))';
INSERT INTO geom VALUES (ST_GeomFromText(@g));
34
To use ST_GeomFromText() function to create geometry values. We can also use type-specific
functions:
SET @g = 'POLYGON((0 0,10 0,10 10,0 10,0 0),(5 5,7 5,7 7,5 7, 5 5))';
INSERT INTO geom VALUES (ST_PolygonFromText(@g));
Fetching spatial data in WKT format:The ST_AsText() function converts a geometry from
internal format to a WKT string.
SELECT ST_AsText(g) FROM geom;
Fetching spatial data in WKB format:The ST_AsBinary() function converts a geometry from
internal format to a BLOB containing the WKB value.
SELECT ST_AsBinary(g) FROM geom;
RESULT:
The spatial data storage creation and retrieve of data in mysql has been executed
successfully.
35
EX No:
Temporal data storage and retrieval in MySQL
Date :
AIM:
To create a Temporal data storage and retrieval in MySQL.
PROCEDURE:
THEORY :
TEMPORAL DATATYPE
MySQL provides data types for storing different kinds of temporal information. In the following
descriptions, the terms YYYY, MM, DD, hh, mm, and ss stand for a year, month, day of month,
hour, minute, and second value, respectively.
The following table summarizes the storage requirements and ranges for the date and time data types.
To insert
mysql> INSERT INTO ts_test1 (data) VALUES ('original_value');
Query OK, 1 row affected (0.00 sec)
36
To update
mysql> UPDATE ts_test1 SET data='updated_value';
Query OK, 1 row affected (0.00 sec)
Rows matched: 1 Changed: 1 Warnings: 0
To retrieve
mysql> SELECT * FROM ts_test1;
+---------------------+---------------------+---------------+ | ts1 | ts2 | data |
+---------------------+---------------------+---------------+ | 2005-01-04 14:46:17 | 0000-00-00 00:00:00 |
updated_value | +---------------------+---------------------+---------------+ 1 row in set (0.00 sec)
mysql> INSERT INTO ts_test3 (data) VALUES ('original_value'); Query OK, 1 row affected (0.00
sec)
mysql> CREATE TABLE ts_test5 ( -> created TIMESTAMP DEFAULT 0, -> updated
TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, -> data CHAR(30) -> );
Query OK, 0 rows affected (0.01 sec)
mysql> INSERT INTO ts_test5 (created, data) -> VALUES (NULL, 'original_value');
Query OK, 1 row affected (0.00 sec)
RESULT:
The Temporal data storage and retrieval in MySQL is executed successfully.
39
EX No: Object storage and retrieval
Date :
AIM:
To create and execute Object data storage and retrieval.
PROCEDURE:
QUERY:
To create a table
CREATE TABLE Customer_objtab OF Customer_objtyp (CustNo PRIMARY KEY)
OBJECT ID PRIMARY KEY ;
40
CREATE TABLE Stock_objtab OF StockItem_objtyp (StockNo PRIMARY KEY) OBJECT ID
PRIMARY KEY ;
To alter table
ALTER TABLE PoLine_ntab
ADD (SCOPE FOR (Stock_ref) IS stock_objtab) ;
To insert
INSERT INTO Stock_objtab VALUES(1004, 6750.00, 2) ;
INSERT INTO Stock_objtab VALUES(1011, 4500.23, 2) ;
INSERT INTO Stock_objtab VALUES(1534, 2234.00, 2) ;
INSERT INTO Stock_objtab VALUES(1535, 3456.23, 2) ;
42
SELECT p.PONo
FROM PurchaseOrder_objtab p
ORDER BY VALUE(p) ;
SELECT AVG(L.DISCOUNT)
FROM PurchaseOrder_objtab po, TABLE (po.LineItemList_ntab) L ;
To delete
DELETE
FROM PurchaseOrder_objtab
WHERE PONo = 1001 ;
RESULT:
The creation and execution of Object storage and retrieval was executed successfully.
43
EX No: XML Databases, XML table creation, XQuery FLWOR
expression
Date :
AIM:
To create and execute XML Databases , XML table creation, XQuery FLWOR expression.
PROCEDURE:
QUERIES:
To create table
CREATE TABLE mytable1 (key_column VARCHAR2(10) PRIMARY KEY, xml_column
XMLType);
Table created.
Table created.
To insert values:
INSERT INTO mytable2 VALUES (XMLType(bfilename('XMLDIR', 'purchaseOrder.xml'),
nls_charset_id('AL32UTF8')));
XMLQUERY('FOR$IIN/PURCHASEORDERWHERE$I/COSTCENTEREQ"A10"AND$I/
USEREQ"SMCCAIN"RET
--------------------------------------------------------------------------------
<A10po pono="SMCCAIN-20021009123336151PDT"></A10po>
<A10po pono="SMCCAIN-20021009123336341PDT"></A10po>
<A10po pono="SMCCAIN-20021009123337173PDT"></A10po>
<A10po pono="SMCCAIN-20021009123335681PDT"></A10po>
<A10po pono="SMCCAIN-20021009123335470PDT"></A10po>
<A10po pono="SMCCAIN-20021009123336972PDT"></A10po>
<A10po pono="SMCCAIN-20021009123336842PDT"></A10po>
<A10po pono="SMCCAIN-20021009123336512PDT"></A10po>
<A10po pono="SMCCAIN-2002100912333894PDT"></A10po>
44
<A10po pono="SMCCAIN-20021009123337403PDT"></A10po>
XML File:
<PurchaseOrder>
<Reference>SBELL-2002100912333601PDT</Reference>
<Actions>
<Action>
<User>SVOLLMAN</User>
</Action>
</Actions>
...
</PurchaseOrder>
<PurchaseOrder>
<Reference>ABEL-20021127121040897PST</Reference>
<Actions>
<Action>
<User>ZLOTKEY</User>
</Action>
<Action>
<User>KING</User>
</Action>
</Actions>
...
</PurchaseOrder>
RESULT:
The creation and execution of XML Databases , XML table creation, XQuery FLWOR expression
has been completed successfully.
45