installazione:

sudo pecl install mongo
#nel PHP.ini aggiungi
extension=mongo.so

configurazione in /etc/mongodb.conf il db è in /var/lib/mongodb

$ sudo status mongodb
$ sudo stop mongodb
$ sudo start mongodb
$ sudo restart mongodb
 
sudo service mongodb stop
$ mongo -u taz -p anadminpassword --host localhost
MongoDB shell version: 1.5.4-pre-
connecting to: test
> db.auth("taz", "anadminpassword")
 
jmongobrowser localhost

load data:

mongoimport --db sales --collection customers --type csv --headerline --file customers.csv

mysql can export data to csv:

SELECT CustomerId,email,name,surname
FROM customers
INTO OUTFILE 'customers.csv'
FIELDS TERMINATED BY ',' ENCLOSED BY '"' LINES TERMINATED BY '\n'

auth

use test
> db.addUser("taz", "anadminpassword")

Use Cases

is Mongo faster than Mysql? MongoDB is not magically faster. If you store the same data, organised in basically the same fashion, and access it exactly the same way, then you really shouldn't expect your results to be wildly different. After all, MySQL and MongoDB are both GPL, so if Mongo had some magically better IO code in it, then the MySQL team could just incorporate it into their codebase.

People are seeing real world MongoDB performance largely because MongoDB allows you to query in a different manner that is more sensible to your workload.

For example, consider a design that persisted a lot of information about a complicated entity in a normalised fashion. This could easily use dozens of tables in MySQL (or any relational db) to store the data in normal form, with many indexes needed to ensure relational integrity between tables.

Now consider the same design with a document store. If all of those related tables are subordinate to the main table (and they often are), then you might be able to model the data such that the entire entity is stored in a single document. In MongoDB you can store this as a single document, in a single collection. This is where MongoDB starts enabling superior performance.

Mongo + Go

esempio di uso:

package main
import (
    "gopkg.in/mgo.v2"
    "gopkg.in/mgo.v2/bson"
    "time"
    "fmt"
    "log"
)
type Person struct  {
    ID bson.ObjectId `bson:"_id,omitempty"`
    Name string
    Phone string
    TimeStamp time.Time
}
func main() {
    start := time.Now()
    db,e := mgo.Dial("localhost")
    err(e)
    defer db.Close()
    var (
        // set true before inserting fresh data. set it false again for insert, select, update.
        isDrop = false
    )
    db.SetMode(mgo.Monotonic,true)
    if isDrop {
        e = db.DB("test").DropDatabase()
        err(e)
    }
    c := db.DB("test").C("people")
    i := mgo.Index{
        Key: []string{"_id"},
        Unique: true,
        DropDups: true,
        Background: true,
        Sparse: true,
    }
    e = c.EnsureIndex(i)
    err(e)
    findAll(c)
    log.Printf("\t%s\n",time.Since(start))
}
func insert(c *mgo.Collection,n int){
    for i:=0;i<n;i++ {
        t:=time.Now()
        e := c.Insert(&Person{Name:"Hemraj", Phone:"9175290756", TimeStamp:t})
        err(e)
    }
}
func findOne(c *mgo.Collection)  {
    r := &Person{}
    e := c.Find(bson.M{"name":"Hemraj"}).Select(bson.M{"phone":0}).One(&r)
    err(e)
    fmt.Println("phone: ",r)
}
func findAll(c *mgo.Collection)  {
    var r []Person
    e := c.Find(bson.M{"name":"Hemraj"}).Sort("-_id").All(&r)
    err(e)
    fmt.Println("Results: ",r)
}
func update(c *mgo.Collection)  {
    cq := bson.M{"name": "Hemraj"}
    change := bson.M{"$set": bson.M{"phone": "10", "timestamp": time.Now()}}
    e := c.Update(cq, change)
    err(e)
}
func err(e error)  {
    if e != nil{
        panic(e)
    }
}