In [2]:
import requests
import pandas as pd
import xml.etree.ElementTree as ET
In [ ]:
 

XML 1

In [132]:
xml_url = 'https://www.w3schools.com/xml/cd_catalog.xml'

# Read XML from URL...
xml_response = requests.get(xml_url, timeout=5)
my_xml_root = ET.fromstring(xml_response.text)


# Parse XML from str...
# my_xml_root = ET.fromstring(xml_data) # to read from file: ET.parse('sample.xml').getroot()


# # Parse XML from file...
# my_xml_root = ET.parse(xml_file).getroot()


my_xml_root
Out[132]:
<Element 'CATALOG' at 0x000001C0E03DE180>
In [133]:
my_xml_root1, my_xml_root2, my_xml_root3
Out[133]:
(<Element 'CATALOG' at 0x000001C0E04EA360>,
 <Element 'CATALOG' at 0x000001C0E03DBD10>,
 <Element 'CATALOG' at 0x000001C0E04E5B80>)
In [201]:
# The root element is only one in this case and it has no attributes as seen below...
my_xml_root.tag # 'CATALOG'
my_xml_root.attrib # empty dict {}


# The first child...
my_xml_root[0]

for x in my_xml_root[0]:
    print(x.tag, x.attrib, x.text)
# =========================================

    
# Now lets get all children as well as there text attributes
all_children = list(my_xml_root) # Old way is: my_xml_root.getchildren()

for child in all_children:
    print(child.find('TITLE').text)
    print(child.find('ARTIST').text)
    print(child.find('COUNTRY').text)
    print(child.find('COMPANY').text)
    print(child.find('PRICE').text)
    print(child.find('YEAR').text)
    
    break # only print the first record
# =========================================



# Now we can collect the records into list/dictionary to construct a friend dataframe structure
data_list = []
for child in all_children:
    data_dict = {}
    
    data_dict['Title'] = child.find('TITLE').text
    data_dict['Artist'] = child.find('ARTIST').text
    data_dict['Country'] = child.find('COUNTRY').text
    data_dict['Company'] = child.find('COMPANY').text
    data_dict['Price'] = child.find('PRICE').text
    data_dict['Year'] = child.find('YEAR').text
    
    data_list.append(data_dict)
# =========================================

    

# Generate the df...
data_list_df = pd.DataFrame(data_list)
data_list_df
TITLE {} Empire Burlesque
ARTIST {} Bob Dylan
COUNTRY {} USA
COMPANY {} Columbia
PRICE {} 10.90
YEAR {} 1985
Empire Burlesque
Bob Dylan
USA
Columbia
10.90
1985
Out[201]:
Title Artist Country Company Price Year
0 Empire Burlesque Bob Dylan USA Columbia 10.90 1985
1 Hide your heart Bonnie Tyler UK CBS Records 9.90 1988
2 Greatest Hits Dolly Parton USA RCA 9.90 1982
3 Still got the blues Gary Moore UK Virgin records 10.20 1990
4 Eros Eros Ramazzotti EU BMG 9.90 1997
5 One night only Bee Gees UK Polydor 10.90 1998
6 Sylvias Mother Dr.Hook UK CBS 8.10 1973
7 Maggie May Rod Stewart UK Pickwick 8.50 1990
8 Romanza Andrea Bocelli EU Polydor 10.80 1996
9 When a man loves a woman Percy Sledge USA Atlantic 8.70 1987
10 Black angel Savage Rose EU Mega 10.90 1995
11 1999 Grammy Nominees Many USA Grammy 10.20 1999
12 For the good times Kenny Rogers UK Mucik Master 8.70 1995
13 Big Willie style Will Smith USA Columbia 9.90 1997
14 Tupelo Honey Van Morrison UK Polydor 8.20 1971
15 Soulsville Jorn Hoel Norway WEA 7.90 1996
16 The very best of Cat Stevens UK Island 8.90 1990
17 Stop Sam Brown UK A and M 8.90 1988
18 Bridge of Spies T'Pau UK Siren 7.90 1987
19 Private Dancer Tina Turner UK Capitol 8.90 1983
20 Midt om natten Kim Larsen EU Medley 7.80 1983
21 Pavarotti Gala Concert Luciano Pavarotti UK DECCA 9.90 1991
22 The dock of the bay Otis Redding USA Stax Records 7.90 1968
23 Picture book Simply Red EU Elektra 7.20 1985
24 Red The Communards UK London 7.80 1987
25 Unchain my heart Joe Cocker USA EMI 8.20 1987
In [ ]:
 

XML 2

In [3]:
new_xml_url = 'https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/ciyn3'

# Read XML from URL...
xml_response = requests.get(new_xml_url, timeout=5)
my_xml_root = ET.fromstring(xml_response.text)
# ------------------------

my_xml_root.attrib
my_xml_root.tag

# Get all children...
all_children = list(my_xml_root)

# the items we wanted are in the fifth child...
all_children[4].tag
all_children[4].attrib

content_text = all_children[4].text
content_text_list = content_text.split(', ')

content_text_list
Out[3]:
'b: 495419K, c: 3D NETWORKS SDN. BHD., d: 100,291, e: TA-9-1 LEVEL 9, TOWER A PLAZA 33, f: NO 1, JALAN KEMAJUAN, g: SEKSYEN 13 SELANGOR, h: PETALING JAYA, i: 46200, j: [NULL], k: Selangor, l: 60379403868, m: 60379403888, n: [email protected], o: Services, p: 62010, q: COMPUTER INDUSTRY, r: 2000-07-11-00.00.00.000000, s: 20, t: 64, u: 64, v: M, w: Y'
In [250]:
import re

# Template: re.sub(regex_pattern, new_str, original_str)
[ re.sub(r'[a-z]: ', '', x_string) for x_string in content_text_list ]
Out[250]:
['495419K',
 '3D NETWORKS SDN. BHD.',
 '100,291',
 'TA-9-1 LEVEL 9',
 'TOWER A PLAZA 33',
 'NO 1',
 'JALAN KEMAJUAN',
 'SEKSYEN 13 SELANGOR',
 'PETALING JAYA',
 '46200',
 '[NULL]',
 'Selangor',
 '60379403868',
 '60379403888',
 '[email protected]',
 'Services',
 '62010',
 'COMPUTER INDUSTRY',
 '2000-07-11-00.00.00.000000',
 '20',
 '64',
 '64',
 'M',
 'Y']
In [ ]:
 
In [7]:
 
In [14]:
import re
import requests
import pandas as pd
import xml.etree.ElementTree as ET


xml_list = ['https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/cokwr', 'https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/cpzh4', 'https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/cre1l', 'https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/chk2m', 'https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/ciyn3', 'https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/ckd7g', 'https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/clrrx', 'https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/cyevm', 'https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/cztg3', 'https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/d180g', 'https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/d2mkx', 'https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/cssly', 'https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/cu76f', 'https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/cvlqs', 'https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/cx0b9', 'https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/d9ney', 'https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/db1zf', 'https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/dcgjs', 'https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/ddv49']

data_list = []
for url in xml_list:
    print('Processing...', url)
    
    # Read XML from URL...
    xml_response = requests.get(url, timeout=5)
    my_xml_root = ET.fromstring(xml_response.text)
    # ------------------------
    
    # Get all children...
    all_children = list(my_xml_root)
    
    # the items we wanted are in the fifth child...
    all_children[4].tag
    all_children[4].attrib

    content_text = all_children[4].text
    content_text_list = content_text.split(', ')
    
    # Template: re.sub(regex_pattern, new_str, original_str)
    content = [ re.sub(r'[a-z]: ', '', x_string) for x_string in content_text_list ]
    
    data_list.append(content)

    # break

print('Finished....')

# Generate the df...
data_list_df = pd.DataFrame(data_list)
data_list_df
Processing... https://spreadsheets.google.com/feeds/list/17beU3LK_ZJEp4m_WKb2D8xDyX-6_MmmgU8BAnWf4NM0/o8b7v7h/public/values/cokwr
Finished....
Out[14]:
0 1 2 3 4 5 6 7 8 9 ... 13 14 15 16 17 18 19 20 21 22
0 877335W 1 SECURITY SERVICES SDN. BHD. 107,506 NO 65A-1 LORONG HARUAN 5/3 OAKLAND COMMERCIAL SQUARE NEGERI SEMBILAN SEREMBAN 70300 [NULL] ... [email protected] Services 80100 SECURITY FIRMS 2011-02-24-00.00.00.000000 30 55 55 M Y

1 rows × 23 columns

In [61]:
 
In [ ]:
 

XML 3

In [81]:
url = 'https://www.w3schools.com/xml/plant_catalog.xml'

# Read XML from URL...
xml_response = requests.get(url, timeout=5)
my_xml_root = ET.fromstring(xml_response.text)

# Get all children...
all_children = list(my_xml_root)

# Check tag, attribute and text of first child...
all_children[0][0].tag, all_children[0][0].attrib, all_children[0][0].text

# Loop to see all tag, attribute and text of first child...
for x in all_children[0]:
    print(x.tag, x.attrib, x.text)
    
# Now we can collect the records into list/dictionary to construct a friend dataframe structure
data_list = []
for child in all_children:
    data_dict = {}
    
    data_dict['Common'] = child.find('COMMON').text
    data_dict['Botanical'] = child.find('BOTANICAL').text
    data_dict['Zone'] = child.find('ZONE').text
    data_dict['Light'] = child.find('LIGHT').text
    data_dict['Price'] = child.find('PRICE').text
    data_dict['Availability'] = child.find('AVAILABILITY').text
    
    data_list.append(data_dict)
# =========================================

# Generate the df...
data_list_df = pd.DataFrame(data_list)
data_list_df
    
COMMON {} Bloodroot
BOTANICAL {} Sanguinaria canadensis
ZONE {} 4
LIGHT {} Mostly Shady
PRICE {} $2.44
AVAILABILITY {} 031599
Out[81]:
Common Botanical Zone Light Price Availability
0 Bloodroot Sanguinaria canadensis 4 Mostly Shady $2.44 031599
1 Columbine Aquilegia canadensis 3 Mostly Shady $9.37 030699
2 Marsh Marigold Caltha palustris 4 Mostly Sunny $6.81 051799
3 Cowslip Caltha palustris 4 Mostly Shady $9.90 030699
4 Dutchman's-Breeches Dicentra cucullaria 3 Mostly Shady $6.44 012099
5 Ginger, Wild Asarum canadense 3 Mostly Shady $9.03 041899
6 Hepatica Hepatica americana 4 Mostly Shady $4.45 012699
7 Liverleaf Hepatica americana 4 Mostly Shady $3.99 010299
8 Jack-In-The-Pulpit Arisaema triphyllum 4 Mostly Shady $3.23 020199
9 Mayapple Podophyllum peltatum 3 Mostly Shady $2.98 060599
10 Phlox, Woodland Phlox divaricata 3 Sun or Shade $2.80 012299
11 Phlox, Blue Phlox divaricata 3 Sun or Shade $5.59 021699
12 Spring-Beauty Claytonia Virginica 7 Mostly Shady $6.59 020199
13 Trillium Trillium grandiflorum 5 Sun or Shade $3.90 042999
14 Wake Robin Trillium grandiflorum 5 Sun or Shade $3.20 022199
15 Violet, Dog-Tooth Erythronium americanum 4 Shade $9.04 020199
16 Trout Lily Erythronium americanum 4 Shade $6.94 032499
17 Adder's-Tongue Erythronium americanum 4 Shade $9.58 041399
18 Anemone Anemone blanda 6 Mostly Shady $8.86 122698
19 Grecian Windflower Anemone blanda 6 Mostly Shady $9.16 071099
20 Bee Balm Monarda didyma 4 Shade $4.59 050399
21 Bergamot Monarda didyma 4 Shade $7.16 042799
22 Black-Eyed Susan Rudbeckia hirta Annual Sunny $9.80 061899
23 Buttercup Ranunculus 4 Shade $2.57 061099
24 Crowfoot Ranunculus 4 Shade $9.34 040399
25 Butterfly Weed Asclepias tuberosa Annual Sunny $2.78 063099
26 Cinquefoil Potentilla Annual Shade $7.06 052599
27 Primrose Oenothera 3 - 5 Sunny $6.56 013099
28 Gentian Gentiana 4 Sun or Shade $7.81 051899
29 Blue Gentian Gentiana 4 Sun or Shade $8.56 050299
30 Jacob's Ladder Polemonium caeruleum Annual Shade $9.26 022199
31 Greek Valerian Polemonium caeruleum Annual Shade $4.36 071499
32 California Poppy Eschscholzia californica Annual Sun $7.89 032799
33 Shooting Star Dodecatheon Annual Mostly Shady $8.60 051399
34 Snakeroot Cimicifuga Annual Shade $5.63 071199
35 Cardinal Flower Lobelia cardinalis 2 Shade $3.02 022299
In [ ]:
 
In [ ]:
 

XML 4

In [90]:
url = 'https://www.w3schools.com/xml/simple.xml'

# Read XML from URL...
xml_response = requests.get(url, timeout=5)
my_xml_root = ET.fromstring(xml_response.text)

# Get all children...
all_children = list(my_xml_root)

# Check tag, attribute and text of first child...
all_children[0][0].tag, all_children[0][0].attrib, all_children[0][0].text

# Loop to see all tag, attribute and text of first child...
for x in all_children[0]:
    print(x.tag, x.attrib, x.text)
    
# Now we can collect the records into list/dictionary to construct a friend dataframe structure
data_list = []
for child in all_children:
    data_dict = {}
    
    data_dict['Name'] = child.find('name').text
    data_dict['Price'] = child.find('price').text
    data_dict['Description'] = child.find('description').text
    data_dict['Calories'] = child.find('calories').text
    
    data_list.append(data_dict)
# =========================================

# Generate the df...
data_list_df = pd.DataFrame(data_list)
data_list_df
name {} Belgian Waffles
price {} $5.95
description {} Two of our famous Belgian Waffles with plenty of real maple syrup
calories {} 650
Out[90]:
Name Price Description Calories
0 Belgian Waffles $5.95 Two of our famous Belgian Waffles with plenty ... 650
1 Strawberry Belgian Waffles $7.95 Light Belgian waffles covered with strawberrie... 900
2 Berry-Berry Belgian Waffles $8.95 Light Belgian waffles covered with an assortme... 900
3 French Toast $4.50 Thick slices made from our homemade sourdough ... 600
4 Homestyle Breakfast $6.95 Two eggs, bacon or sausage, toast, and our eve... 950
In [ ]:
 

XML 5

In [95]:
xml_data = '''<?xml version="1.0"?>
<catalog>
   <book id="bk101">
      <author>Gambardella, Matthew</author>
      <title>XML Developer's Guide</title>
      <genre>Computer</genre>
      <price>44.95</price>
      <publish_date>2000-10-01</publish_date>
      <description>An in-depth look at creating applications 
      with XML.</description>
   </book>
   <book id="bk102">
      <author>Ralls, Kim</author>
      <title>Midnight Rain</title>
      <genre>Fantasy</genre>
      <price>5.95</price>
      <publish_date>2000-12-16</publish_date>
      <description>A former architect battles corporate zombies, 
      an evil sorceress, and her own childhood to become queen 
      of the world.</description>
   </book>
   <book id="bk103">
      <author>Corets, Eva</author>
      <title>Maeve Ascendant</title>
      <genre>Fantasy</genre>
      <price>5.95</price>
      <publish_date>2000-11-17</publish_date>
      <description>After the collapse of a nanotechnology 
      society in England, the young survivors lay the 
      foundation for a new society.</description>
   </book>
   <book id="bk104">
      <author>Corets, Eva</author>
      <title>Oberon's Legacy</title>
      <genre>Fantasy</genre>
      <price>5.95</price>
      <publish_date>2001-03-10</publish_date>
      <description>In post-apocalypse England, the mysterious 
      agent known only as Oberon helps to create a new life 
      for the inhabitants of London. Sequel to Maeve 
      Ascendant.</description>
   </book>
   <book id="bk105">
      <author>Corets, Eva</author>
      <title>The Sundered Grail</title>
      <genre>Fantasy</genre>
      <price>5.95</price>
      <publish_date>2001-09-10</publish_date>
      <description>The two daughters of Maeve, half-sisters, 
      battle one another for control of England. Sequel to 
      Oberon's Legacy.</description>
   </book>
   <book id="bk106">
      <author>Randall, Cynthia</author>
      <title>Lover Birds</title>
      <genre>Romance</genre>
      <price>4.95</price>
      <publish_date>2000-09-02</publish_date>
      <description>When Carla meets Paul at an ornithology 
      conference, tempers fly as feathers get ruffled.</description>
   </book>
   <book id="bk107">
      <author>Thurman, Paula</author>
      <title>Splish Splash</title>
      <genre>Romance</genre>
      <price>4.95</price>
      <publish_date>2000-11-02</publish_date>
      <description>A deep sea diver finds true love twenty 
      thousand leagues beneath the sea.</description>
   </book>
   <book id="bk108">
      <author>Knorr, Stefan</author>
      <title>Creepy Crawlies</title>
      <genre>Horror</genre>
      <price>4.95</price>
      <publish_date>2000-12-06</publish_date>
      <description>An anthology of horror stories about roaches,
      centipedes, scorpions  and other insects.</description>
   </book>
   <book id="bk109">
      <author>Kress, Peter</author>
      <title>Paradox Lost</title>
      <genre>Science Fiction</genre>
      <price>6.95</price>
      <publish_date>2000-11-02</publish_date>
      <description>After an inadvertant trip through a Heisenberg
      Uncertainty Device, James Salway discovers the problems 
      of being quantum.</description>
   </book>
   <book id="bk110">
      <author>O'Brien, Tim</author>
      <title>Microsoft .NET: The Programming Bible</title>
      <genre>Computer</genre>
      <price>36.95</price>
      <publish_date>2000-12-09</publish_date>
      <description>Microsoft's .NET initiative is explored in 
      detail in this deep programmer's reference.</description>
   </book>
   <book id="bk111">
      <author>O'Brien, Tim</author>
      <title>MSXML3: A Comprehensive Guide</title>
      <genre>Computer</genre>
      <price>36.95</price>
      <publish_date>2000-12-01</publish_date>
      <description>The Microsoft MSXML3 parser is covered in 
      detail, with attention to XML DOM interfaces, XSLT processing, 
      SAX and more.</description>
   </book>
   <book id="bk112">
      <author>Galos, Mike</author>
      <title>Visual Studio 7: A Comprehensive Guide</title>
      <genre>Computer</genre>
      <price>49.95</price>
      <publish_date>2001-04-16</publish_date>
      <description>Microsoft Visual Studio 7 is explored in depth,
      looking at how Visual Basic, Visual C++, C#, and ASP+ are 
      integrated into a comprehensive development 
      environment.</description>
   </book>
</catalog>'''
In [97]:
url = 'https://docs.microsoft.com/en-us/previous-versions/windows/desktop/ms762271(v=vs.85)' # This url isn't an XML page, so we will read from str or file.


# Read XML from URL...
# xml_response = requests.get(url, timeout=5)
# my_xml_root = ET.fromstring(xml_response.text)

# Read XML from str...
my_xml_root = ET.fromstring(xml_data) # to read from file: ET.parse('sample.xml').getroot()


# Get all children...
all_children = list(my_xml_root)

# Check tag, attribute and text of first child...
all_children[0][0].tag, all_children[0][0].attrib, all_children[0][0].text

# Loop to see all tag, attribute and text of first child...
for x in all_children[0]:
    print(x.tag, x.attrib, x.text)
    
# Now we can collect the records into list/dictionary to construct a friend dataframe structure
data_list = []
for child in all_children:
    data_dict = {}
    
    data_dict['Author'] = child.find('author').text
    data_dict['Title'] = child.find('title').text
    data_dict['Genre'] = child.find('genre').text
    data_dict['Price'] = child.find('price').text
    data_dict['Publish Date'] = child.find('publish_date').text
    data_dict['Description'] = child.find('description').text
    
    
    data_list.append(data_dict)
# =========================================

# Generate the df...
data_list_df = pd.DataFrame(data_list)
data_list_df
author {} Gambardella, Matthew
title {} XML Developer's Guide
genre {} Computer
price {} 44.95
publish_date {} 2000-10-01
description {} An in-depth look at creating applications 
      with XML.
Out[97]:
Author Title Genre Price Publish Date Description
0 Gambardella, Matthew XML Developer's Guide Computer 44.95 2000-10-01 An in-depth look at creating applications \n ...
1 Ralls, Kim Midnight Rain Fantasy 5.95 2000-12-16 A former architect battles corporate zombies, ...
2 Corets, Eva Maeve Ascendant Fantasy 5.95 2000-11-17 After the collapse of a nanotechnology \n ...
3 Corets, Eva Oberon's Legacy Fantasy 5.95 2001-03-10 In post-apocalypse England, the mysterious \n ...
4 Corets, Eva The Sundered Grail Fantasy 5.95 2001-09-10 The two daughters of Maeve, half-sisters, \n ...
5 Randall, Cynthia Lover Birds Romance 4.95 2000-09-02 When Carla meets Paul at an ornithology \n ...
6 Thurman, Paula Splish Splash Romance 4.95 2000-11-02 A deep sea diver finds true love twenty \n ...
7 Knorr, Stefan Creepy Crawlies Horror 4.95 2000-12-06 An anthology of horror stories about roaches,\...
8 Kress, Peter Paradox Lost Science Fiction 6.95 2000-11-02 After an inadvertant trip through a Heisenberg...
9 O'Brien, Tim Microsoft .NET: The Programming Bible Computer 36.95 2000-12-09 Microsoft's .NET initiative is explored in \n ...
10 O'Brien, Tim MSXML3: A Comprehensive Guide Computer 36.95 2000-12-01 The Microsoft MSXML3 parser is covered in \n ...
11 Galos, Mike Visual Studio 7: A Comprehensive Guide Computer 49.95 2001-04-16 Microsoft Visual Studio 7 is explored in depth...
In [ ]:
 
In [ ]: